You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ro...@apache.org on 2018/01/01 11:14:24 UTC
[cloudstack] branch master updated: CLOUDSTACK-10163: Component
tests sanity (#2344)
This is an automated email from the ASF dual-hosted git repository.
rohit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/master by this push:
new 67f509d CLOUDSTACK-10163: Component tests sanity (#2344)
67f509d is described below
commit 67f509dc5768a1a7ad2a64dbe2e30c56fe8bd429
Author: Boris Stoyanov - a.k.a Bobby <bs...@gmail.com>
AuthorDate: Mon Jan 1 13:14:18 2018 +0200
CLOUDSTACK-10163: Component tests sanity (#2344)
Fixing some component tests and adding them in travis.
Signed-off-by: Rohit Yadav <ro...@shapeblue.com>
---
.travis.yml | 11 +-
.../component/test_VirtualRouter_alerts.py | 5 +-
.../component/test_add_remove_network.py | 8 +-
test/integration/component/test_affinity_groups.py | 2 +-
test/integration/component/test_assign_vm.py | 2 +-
test/integration/component/test_blocker_bugs.py | 4 +-
.../integration/component/test_browse_templates.py | 463 ++++++------
.../component/test_browse_templates2.py | 2 +-
test/integration/component/test_cpu_limits.py | 112 +--
test/integration/component/test_cpu_max_limits.py | 88 +--
.../component/test_cpu_project_limits.py | 77 +-
.../component/test_dynamic_compute_offering.py | 2 +-
test/integration/component/test_egress_fw_rules.py | 1 +
.../test_escalation_listTemplateDomainAdmin.py | 71 +-
.../component/test_escalations_instances.py | 11 +-
.../component/test_escalations_ipaddresses.py | 4 +-
.../component/test_escalations_networks.py | 20 +-
.../component/test_escalations_routers.py | 5 +-
.../component/test_escalations_templates.py | 155 ++--
test/integration/component/test_host_ha.py | 59 +-
test/integration/component/test_interop_xd_ccp.py | 2 +-
.../test_migrate_vol_to_maintained_pool.py | 3 +
.../component/test_multiple_public_interfaces.py | 18 +-
test/integration/component/test_overcommit.py | 23 +-
.../component/test_persistent_networks.py | 25 +-
test/integration/component/test_portable_ip.py | 3 +-
test/integration/component/test_project_configs.py | 155 +---
.../component/test_ps_resource_limits_volume.py | 2 +-
.../component/test_redundant_router_cleanups.py | 2 +-
.../component/test_rootvolume_resize.py | 10 +-
test/integration/component/test_routers.py | 13 +-
test/integration/component/test_secsr_mount.py | 3 +-
test/integration/component/test_ss_limits.py | 2 +
...emplate_from_snapshot_with_template_details.py} | 17 -
test/integration/component/test_volumes.py | 802 ++++++++++-----------
test/integration/component/test_vpc.py | 2 +
.../component/test_vpc_vm_life_cycle.py | 366 +++++-----
test/integration/component/test_vpn_service.py | 2 +-
tools/marvin/marvin/config/test_data.py | 22 +-
tools/marvin/marvin/lib/base.py | 2 +-
40 files changed, 1167 insertions(+), 1409 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index b4749c0..d5fd173 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -32,6 +32,7 @@ env:
global:
- PATH=$HOME/.local/bin:$PATH
matrix:
+ # Keep the TESTS sorted by name and grouped by type
- TESTS="smoke/test_accounts
smoke/test_affinity_groups
smoke/test_affinity_groups_projects
@@ -108,8 +109,7 @@ env:
component/test_acl_listsnapshot
component/test_acl_listvm
component/test_acl_listvolume
- component/test_acl_sharednetwork
- component/test_affinity_groups_projects"
+ component/test_acl_sharednetwork"
- TESTS="component/test_allocation_states
component/test_acl_sharednetwork_deployVM-impersonation
@@ -118,7 +118,6 @@ env:
component/test_cpu_limits"
- TESTS="component/test_cpu_max_limits
- component/test_acl_isolatednetwork
component/test_cpu_project_limits
component/test_deploy_vm_userdata_multi_nic
component/test_egress_fw_rules
@@ -143,7 +142,10 @@ env:
component/test_snapshots
component/test_stopped_vm"
- - TESTS="component/test_resource_limits"
+ - TESTS="component/test_project_resources"
+
+ - TESTS="component/test_project_limits
+ component/test_resource_limits"
- TESTS="component/test_tags
component/test_templates
@@ -156,6 +158,7 @@ env:
component/test_vpn_users"
# FIXME: fix following tests and include them in Travis
+# - TESTS="component/test_vpc" Please add when PR: https://github.com/apache/cloudstack/pull/955 CLOUDSTACK-8969 is fixed
# - TESTS="component/test_organization_states" Please add when CLOUDSTACK-7735 is fixed
before_install: travis_wait 30 ./tools/travis/before_install.sh
diff --git a/test/integration/component/test_VirtualRouter_alerts.py b/test/integration/component/test_VirtualRouter_alerts.py
index 90f04d8..69262df 100644
--- a/test/integration/component/test_VirtualRouter_alerts.py
+++ b/test/integration/component/test_VirtualRouter_alerts.py
@@ -47,6 +47,7 @@ class TestVRServiceFailureAlerting(cloudstackTestCase):
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
# Get Zone, Domain and templates
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
@@ -173,8 +174,8 @@ class TestVRServiceFailureAlerting(cloudstackTestCase):
result = get_process_status(
host.ipaddress,
22,
- self.services["configurableData"]["host"]["username"],
- self.services["configurableData"]["host"]["password"],
+ self.hostConfig["username"],
+ self.hostConfig["password"],
router.linklocalip,
"service apache2 stop"
)
diff --git a/test/integration/component/test_add_remove_network.py b/test/integration/component/test_add_remove_network.py
index b76197d..38aeee4 100644
--- a/test/integration/component/test_add_remove_network.py
+++ b/test/integration/component/test_add_remove_network.py
@@ -596,10 +596,12 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
self.debug("Trying to %s network in account %s to a vm in account %s, This should fail" %
(network.type, account.name, self.account.name))
- with self.assertRaises(Exception) as e:
+ try:
self.virtual_machine.add_nic(self.apiclient, network.id)
- self.debug("Operation failed with exception %s" % e.exception)
-
+ except Exception:
+ pass
+ else:
+ self.fail("User was able to add NIC, test failed! This issue has been hit: CLOUDSTACK-10071")
return
@attr(tags = ["advanced", "dvs"])
diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py
index 1bf7403..be0a6e5 100644
--- a/test/integration/component/test_affinity_groups.py
+++ b/test/integration/component/test_affinity_groups.py
@@ -1108,7 +1108,7 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
vm1.update_affinity_group(self.api_client, affinitygroupids=[])
vm1.delete(self.api_client)
- # Can cleanup affinity groups since none are set on the VM
+ # Can cleanup affinity groups since none are set on the VM
for aff_grp in aff_grps:
aff_grp.delete(self.api_client)
diff --git a/test/integration/component/test_assign_vm.py b/test/integration/component/test_assign_vm.py
index 466fadf..363bbea 100644
--- a/test/integration/component/test_assign_vm.py
+++ b/test/integration/component/test_assign_vm.py
@@ -100,7 +100,7 @@ class TestVMOwnership(cloudstackTestCase):
cls._cleanup = []
cls.testClient = super(TestVMOwnership, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
-
+ cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
diff --git a/test/integration/component/test_blocker_bugs.py b/test/integration/component/test_blocker_bugs.py
index c1aa1f0..dce6781 100644
--- a/test/integration/component/test_blocker_bugs.py
+++ b/test/integration/component/test_blocker_bugs.py
@@ -179,13 +179,13 @@ class TestTemplate(cloudstackTestCase):
self.services["template"]["url"] = builtin_info[0]
self.services["template"]["hypervisor"] = builtin_info[1]
self.services["template"]["format"] = builtin_info[2]
-
+ temp = self.services["template"]
self.debug("Registering a new template")
# Register new template
template = Template.register(
self.apiclient,
- self.services["template"],
+ temp,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diff --git a/test/integration/component/test_browse_templates.py b/test/integration/component/test_browse_templates.py
index 0875d01..80e9a13 100644
--- a/test/integration/component/test_browse_templates.py
+++ b/test/integration/component/test_browse_templates.py
@@ -25,23 +25,12 @@ from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
-from marvin.codes import PASS,FAILED,SUCCESS,XEN_SERVER
-
-from marvin.sshClient import SshClient
+from marvin.codes import FAILED
import requests
-
-import wget
-
import random
-
import string
-
-import telnetlib
-import os
-import urllib
import time
-import tempfile
_multiprocess_shared_ = True
class TestBrowseUploadVolume(cloudstackTestCase):
@@ -58,38 +47,22 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
cls.cleanup = []
- cls.uploadtemplateformat="VHD"
- cls.storagetype = 'shared'
-
+ cls.domain = get_domain(cls.apiclient)
+ cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+ cls.unsupportedHypervisor = False
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
+ if cls.hypervisor.lower() in ['lxc']:
+ # Template creation from root volume is not supported in LXC
+ cls.unsupportedHypervisor = True
+ return
+
if hosts is None:
raise unittest.SkipTest(
"There are no hypervisor's available.Check listhosts response")
- for hypervisorhost in hosts :
- if hypervisorhost.hypervisor == "XenServer":
- cls.uploadtemplateformat="VHD"
- break
- elif hypervisorhost.hypervisor== "VMware":
- cls.uploadtemplateformat="OVA"
- break
- elif hypervisorhost.hypervisor=="KVM":
- cls.uploadtemplateformat="QCOW2"
- break
- else:
- break
- cls.uploadurl=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["url"]
- cls.templatename=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["templatename"]
- cls.md5sum=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["checksum"]
- cls.templatedisplaytext=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["displaytext"]
- cls.templatehypervisor=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["hypervisor"]
- cls.templateostypeid=cls.testdata["configurableData"]["browser_upload_template"][cls.uploadtemplateformat]["ostypeid"]
- cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
- cls.domain = get_domain(cls.apiclient)
- cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.account = Account.create(
cls.apiclient,
@@ -104,13 +77,14 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if cls.template == FAILED:
raise unittest.SkipTest(
"Check for default cent OS template readiness ")
+
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
- cls.testdata["configurableData"]["browser_upload_volume"]["browser_resized_disk_offering"],
+ cls.testdata["resized_disk_offering"],
custom=True
)
cls.project = Project.create(
@@ -126,6 +100,58 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cls.disk_offering
]
+ def setUp(self):
+
+ if "kvm" in self.hypervisor.lower():
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template.checksum = "{SHA-1}" + "bf580a13f791d86acf3449a7b457a91a14389264"
+ self.test_template.hypervisor = self.hypervisor
+ self.test_template.zoneid = self.zone.id
+ self.test_template.name = 'test sha-1'
+ self.test_template.displaytext = 'test sha-1'
+ self.test_template.url = "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-kvm.qcow2.bz2"
+ self.test_template.format = "QCOW2"
+ self.test_template.ostypeid = self.getOsType("Other Linux (64-bit)")
+ self.md5 = "ada77653dcf1e59495a9e1ac670ad95f"
+ self.sha256 = "0efc03633f2b8f5db08acbcc5dc1be9028572dfd8f1c6c8ea663f0ef94b458c5"
+
+ if "vmware" in self.hypervisor.lower():
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template.checksum = "{SHA-1}" + "b25d404de8335b4348ff01e49a95b403c90df466"
+ self.test_template.hypervisor = self.hypervisor
+ self.test_template.zoneid = self.zone.id
+ self.test_template.name = 'test sha-2333'
+ self.test_template.displaytext = 'test sha-1'
+ self.test_template.url = "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-vmware.ova"
+ self.test_template.format = "OVA"
+ self.test_template.ostypeid = self.getOsType(self, "Other Linux (64-bit)")
+ self.md5 = "d6d97389b129c7d898710195510bf4fb"
+ self.sha256 = "f57b59f118ab59284a70d6c63229d1de8f2d69bffc5a82b773d6c47e769c12d9"
+
+ if "xen" in self.hypervisor.lower():
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template = registerTemplate.registerTemplateCmd()
+ self.test_template.checksum = "{SHA-1}" + "427fad501d0d8a1d63b8600a9a469fbf91191314"
+ self.test_template.hypervisor = self.hypervisor
+ self.test_template.zoneid = self.zone.id
+ self.test_template.name = 'test sha-2333'
+ self.test_template.displaytext = 'test sha-1'
+ self.test_template.url = "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-xen.vhd.bz2"
+ self.test_template.format = "VHD"
+ self.test_template.ostypeid = self.getOsType("Other Linux (64-bit)")
+ self.md5 = "54ebc933e6e07ae58c0dc97dfd37c824"
+ self.sha256 = "bddd9876021d33df9792b71ae4b776598680ac68ecf55e9d9af33c80904cc1f3"
+
+ if self.unsupportedHypervisor:
+ self.skipTest("Skipping test because unsupported hypervisor\
+ %s" % self.hypervisor)
+
+ def getOsType(self, param):
+ cmd = listOsTypes.listOsTypesCmd()
+ cmd.description = param
+ return self.apiclient.listOsTypes(cmd)[0].id
def __verify_values(self, expected_vals, actual_vals):
@@ -148,36 +174,51 @@ class TestBrowseUploadVolume(cloudstackTestCase):
(exp_val, act_val))
return return_flag
- def validate_uploaded_template(self,up_templateid,templatestate,zid):
-
- config = Configurations.list(
- self.apiclient,
- name='upload.operation.timeout'
- )
-
- uploadtimeout = int(config[0].value)
- time.sleep(uploadtimeout*60)
-
- list_template_response = Template.list(
- self.apiclient,
- id=up_templateid,
- templatefilter="all",
- zoneid=zid)
-
- self.assertNotEqual(
- list_template_response,
- None,
- "Check if template exists in ListTemplates"
- )
-
- self.assertEqual(
- list_template_response[0].status,
- templatestate,
- "Check template status in List templates"
- )
- return
+ def validate_uploaded_template(self, apiclient, template_id, retries=70, interval=5):
+ """Check if template download will finish in 1 minute"""
+ while retries > -1:
+ time.sleep(interval)
+ template_response = Template.list(
+ apiclient,
+ id=template_id,
+ zoneid=self.zone.id,
+ templatefilter='self'
+ )
+ if isinstance(template_response, list):
+ template = template_response[0]
+ if not hasattr(template, 'status') or not template or not template.status:
+ retries = retries - 1
+ continue
+
+ # If template is ready,
+ # template.status = Download Complete
+ # Downloading - x% Downloaded
+ # if Failed
+ # Error - Any other string
+ if 'Failed' in template.status:
+ raise Exception(
+ "Failed to download template: status - %s" %
+ template.status)
+
+ elif template.status == 'Download Complete' and template.isready:
+ return
+
+ elif 'Downloaded' in template.status:
+ retries = retries - 1
+ continue
+
+ elif 'Installing' not in template.status:
+ if retries >= 0:
+ retries = retries - 1
+ continue
+ raise Exception(
+ "Error in downloading template: status - %s" %
+ template.status)
+ else:
+ retries = retries - 1
+ raise Exception("Template download failed exception.")
def gettemplatelimts(self):
@@ -189,7 +230,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return(totaltemplates)
-
def getstoragelimts(self,rtype):
cmd=updateResourceCount.updateResourceCountCmd()
@@ -197,23 +237,22 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.domainid=self.domain.id
cmd.resourcetype=rtype
- responce=self.apiclient.updateResourceCount(cmd)
+ response=self.apiclient.updateResourceCount(cmd)
- totalstorage=responce[0].resourcecount
+ totalstorage=response[0].resourcecount
return(totalstorage)
-
def browse_upload_template(self):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
- cmd.zoneid = self.zone.id
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.zoneid = self.test_template.zoneid
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name + self.account.name + (random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.displaytext=self.test_template.name + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
@@ -223,7 +262,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
metadata=getuploadparamsresponce.metadata
expiredata=getuploadparamsresponce.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ url=self.test_template.url
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
@@ -249,21 +288,20 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code !=200:
self.fail("Upload is not fine")
- self.validate_uploaded_template(getuploadparamsresponce.id,'Download Complete',self.zone.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
return(getuploadparamsresponce)
-
def browse_upload_template_with_out_zoneid(self):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.displaytext=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
success= False
try:
@@ -278,18 +316,17 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
-
def browse_upload_template_with_out_ostypeid(self):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
+ cmd.displaytext=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
success= False
try:
@@ -304,17 +341,16 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
-
def browse_upload_template_with_projectid(self,projectid):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.displaytext=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
cmd.projectid=projectid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
@@ -325,7 +361,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
metadata=getuploadparamsresponce.metadata
expiredata=getuploadparamsresponce.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ url=self.test_template.url
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
@@ -345,7 +381,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if results.status_code !=200:
self.fail("Upload is not fine")
- self.validate_uploaded_template(getuploadparamsresponce.id,'Download Complete',self.zone.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
return(getuploadparamsresponce)
@@ -353,13 +389,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid ="-1"
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.displaytext=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
#cmd.isdynamicallyscalable="false"
#cmd.type="template"
getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
@@ -369,7 +405,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
metadata=getuploadparamsresponce.metadata
expiredata=getuploadparamsresponce.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ url=self.test_template.url
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
@@ -396,20 +432,20 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Upload is not fine")
for z1 in lzones:
- self.validate_uploaded_template(getuploadparamsresponce.id,'Download Complete',z1.id)
+ self.validate_uploaded_template(self.apiclient, getuploadparamsresponce.id)
return(getuploadparamsresponce)
def uploadtemplate(self):
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
- cmd.displaytext=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.displaytext=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
#cmd.type="template"
getuploadparamsresponce=self.apiclient.getUploadParamsForTemplate(cmd)
@@ -418,7 +454,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
metadata=getuploadparamsresponce.metadata
expiredata=getuploadparamsresponce.expires
#url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ url=self.test_template.url
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
@@ -452,41 +488,43 @@ class TestBrowseUploadVolume(cloudstackTestCase):
templ1=self.uploadtemplate()
templ2=self.uploadtemplate()
templ3=self.uploadtemplate()
- time.sleep(600)
- self.validate_uploaded_template(templ1.id,'Download Complete')
- self.validate_uploaded_template(templ2.id,'Download Complete')
- self.validate_uploaded_template(templ3.id,'Download Complete')
+
+ self.validate_uploaded_template(self.apiclient, templ1.id)
+ self.validate_uploaded_template(self.apiclient, templ2.id)
+ self.validate_uploaded_template(self.apiclient, templ3.id)
+
self.delete_template(templ1)
self.delete_template(templ2)
self.delete_template(templ3)
return
- def validate_vm(self,vmdetails,vmstate):
+ def validate_vm(self, apiclient, vm_id, state, retries=72, interval=5):
- time.sleep(120 )
- vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id,
- )
- self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
+ """Check if vm will be running in 6 minute"""
+ while retries > -1:
+ time.sleep(interval)
+ vm_response = list_virtual_machines(
+ apiclient,
+ id=vm_id
)
- # Verify VM response to check whether VM deployment was successful
- self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
+ if isinstance(vm_response, list):
+ vm = vm_response[0]
+ if not hasattr(vm, 'state'):
+ retries = retries - 1
+ continue
- deployedvm = vm_response[0]
- self.assertEqual(
- deployedvm.state,
- vmstate,
- "Check the state of VM"
- )
+ # If vm is Running for x number of retries
+ if vm.state == state:
+ return
+
+ else:
+ retries = retries - 1
+ continue
+
+ else:
+ retries = retries - 1
+ raise Exception("VM failed exception.")
def deploy_vm(self,template):
virtual_machine = VirtualMachine.create(
@@ -498,7 +536,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
- self.validate_vm(virtual_machine,'Running')
+ self.validate_vm(self.apiclient, vm_id = virtual_machine.id, state="Running")
return(virtual_machine)
def attach_volume(self,vmlist,volid):
@@ -528,18 +566,37 @@ class TestBrowseUploadVolume(cloudstackTestCase):
"Check list volumes response for valid list")
self.validate_uploaded_volume(volid,'Ready')
+ def validate_uploaded_volume(self, volid, status):
+ list_volume_response = Volume.list(
+ self.apiclient,
+ id=volid,
+ listall=True
+ )
+
+ self.assertNotEqual(
+ list_volume_response,
+ None,
+ "Check if volume exists in ListTemplates"
+ )
+
+ self.assertEqual(
+ list_volume_response[0].status,
+ status,
+ "Check volume status in List Volumes"
+ )
+ return
def reboot_vm(self,vmdetails):
vmdetails.reboot(self.apiclient)
- self.validate_vm(vmdetails,'Running')
+ self.validate_vm(self.apiclient, vm_id=vmdetails.id, state="Running")
def stop_vm(self,vmdetails):
vmdetails.stop(self.apiclient)
- self.validate_vm(vmdetails,'Stopped')
+ self.validate_vm(self.apiclient, vm_id=vmdetails.id, state="Stopped")
def start_vm(self,vmdetails):
vmdetails.start(self.apiclient)
- self.validate_vm(vmdetails,'Running')
+ self.validate_vm(self.apiclient, vm_id=vmdetails.id, state="Running")
def vmoperations(self,vmdetails):
self.reboot_vm(vmdetails)
@@ -548,7 +605,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.start_vm(vmdetails)
-
def detach_volume(self,vmdetails,volid):
"""Detach a Volume attached to a VM
"""
@@ -590,7 +646,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
def restore_vm(self,vmdetails):
"""Test recover Virtual Machine
"""
@@ -598,7 +653,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.virtualmachineid = vmdetails.id
self.apiclient.recoverVirtualMachine(cmd)
- list_vm_response = VirtualMachine.list(
+ list_vm_response = list_virtual_machines(
self.apiclient,
id=vmdetails.id
)
@@ -639,7 +694,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
-
def deletevolume(self,volumeid):
"""Delete a Volume attached to a VM
"""
@@ -661,12 +715,11 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
def destroy_vm(self,vmdetails):
vmdetails.delete(self.apiclient, expunge=False)
- list_vm_response = VirtualMachine.list(
+ list_vm_response = list_virtual_machines(
self.apiclient,
id=vmdetails.id
)
@@ -689,14 +742,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
def recover_destroyed_vm(self,vmdetails):
cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
cmd.id = vmdetails.id
self.apiclient.recoverVirtualMachine(cmd)
- list_vm_response = VirtualMachine.list(
+ list_vm_response = list_virtual_machines(
self.apiclient,
id=vmdetails.id
)
@@ -745,7 +797,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
expunge_cycle = int(config[0].value)
wait_time = expunge_cycle * 4
while wait_time >= 0:
- list_vm_response = VirtualMachine.list(
+ list_vm_response = list_virtual_machines(
self.apiclient,
id=vmdetails.id
)
@@ -755,11 +807,17 @@ class TestBrowseUploadVolume(cloudstackTestCase):
time.sleep(expunge_cycle)
wait_time = wait_time - expunge_cycle
- self.debug("listVirtualMachines response: %s" % list_vm_response)
+ self.debug("listVirtualMachines response: %s" % list_vm_response)
- self.assertEqual(list_vm_response,None,"Check Expunged virtual machine is in listVirtualMachines response")
- return
+ self.assertEqual(list_vm_response,None,"Check Expunged virtual machine is in listVirtualMachines response")
+ return
+ list_vm_response = list_virtual_machines(
+ self.apiclient,
+ id=vmdetails.id
+ )
+ if isinstance(list_vm_response, list):
+ self.fail("VM has not been expunged")
def waitForSystemVMAgent(self, vmname):
timeout = self.testdata["timeout"]
@@ -779,7 +837,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
-
def ssvm_internals(self):
list_ssvm_response = list_ssvms(
@@ -1059,7 +1116,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
# Call above tests to ensure SSVM is properly running
self.list_sec_storage_vm()
-
def reboot_ssvm(self):
list_ssvm_response = list_ssvms(
@@ -1319,7 +1375,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
def delete_template(self,templatedetails):
list_template_response = Template.list(
@@ -1349,8 +1404,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
-
def detach_data_volume(self,volume,vmdetails):
self.debug("Detach volume: %s to VM: %s" % (
@@ -1395,64 +1448,63 @@ class TestBrowseUploadVolume(cloudstackTestCase):
"""
Test Browser_template_Life_cycle
"""
- try:
+ #try:
- self.debug("========================= Test 1: Upload Browser based template and validate ========================= ")
- browseup_template=self.browse_upload_template()
+ self.debug("========================= Test 1: Upload Browser based template and validate ========================= ")
+ browseup_template=self.browse_upload_template()
- self.debug("========================= Test 2: Deploy a VM with uploaded template and validate VM Operations========================= ")
+ self.debug("========================= Test 2: Deploy a VM with uploaded template and validate VM Operations========================= ")
- vm1details=self.deploy_vm(browseup_template)
+ vm1details=self.deploy_vm(browseup_template)
- self.vmoperations(vm1details)
+ self.vmoperations(vm1details)
- self.debug("========================= Test 3: Attach DATA DISK to the VM ")
+ self.debug("========================= Test 3: Attach DATA DISK to the VM ")
- cvolume=self.create_data_volume()
- self.attach_data_volume(cvolume, vm1details)
- self.vmoperations(vm1details)
+ cvolume=self.create_data_volume()
+ self.attach_data_volume(cvolume, vm1details)
+ self.vmoperations(vm1details)
- self.debug("========================= Test 4: Restore VM created with Uploaded template========================= ")
+ self.debug("========================= Test 4: Restore VM created with Uploaded template========================= ")
- self.restore_vm(vm1details)
+ self.restore_vm(vm1details)
- self.debug("========================= Test 5: Detach DATA DISK to the VM ")
+ self.debug("========================= Test 5: Detach DATA DISK to the VM ")
- self.detach_data_volume(cvolume,vm1details)
- self.vmoperations(vm1details)
+ self.detach_data_volume(cvolume,vm1details)
+ self.vmoperations(vm1details)
- self.deletevolume(cvolume.id)
+ self.deletevolume(cvolume.id)
- self.debug("========================= Test 6: Expunge VM created with Uploaded template========================= ")
+ self.debug("========================= Test 6: Expunge VM created with Uploaded template========================= ")
- self.expunge_vm(vm1details)
+ self.expunge_vm(vm1details)
- self.debug("========================= Test 7: Destroy VM ========================= ")
+ self.debug("========================= Test 7: Destroy VM ========================= ")
- vm2details=self.deploy_vm(self.template)
+ vm2details=self.deploy_vm(self.template)
- vm2details=self.deploy_vm(browseup_template)
- self.destroy_vm(vm2details)
+ vm2details=self.deploy_vm(browseup_template)
+ self.destroy_vm(vm2details)
- self.debug("========================= Test 8: Recover destroyed VM which has Uploaded volumes attached========================= ")
+ self.debug("========================= Test 8: Recover destroyed VM which has Uploaded volumes attached========================= ")
- self.recover_destroyed_vm(vm2details)
- self.expunge_vm(vm2details)
+ self.recover_destroyed_vm(vm2details)
+ self.expunge_vm(vm2details)
- self.debug("========================= Test 9: Delete the Uploaded Template========================= ")
- self.debug(browseup_template)
- self.delete_template(browseup_template)
+ self.debug("========================= Test 9: Delete the Uploaded Template========================= ")
+ self.debug(browseup_template)
+ self.delete_template(browseup_template)
- self.debug("========================= Test 10: Upload Multiple templates========================= ")
+ self.debug("========================= Test 10: Upload Multiple templates========================= ")
- self.multiple_browse_upload_template()
+ self.multiple_browse_upload_template()
- except Exception as e:
- self.fail("Exception occurred : %s" % e)
+# except Exception as e:
+# self.fail("Exception occurred : %s" % e)
return
-
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_02_SSVM_Life_Cycle_With_Browser_Template_TPath(self):
"""
@@ -1500,7 +1552,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_03_Browser_template_upload_multiple_zones(self):
"""
@@ -1520,8 +1571,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
-
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_04_Browser_template_ResetVM_With_Deleted_Template(self):
"""
@@ -1543,7 +1592,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_05_Browser_Upload_Template_with_all_API_parameters(self):
"""
@@ -1582,12 +1630,14 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 21 Upload Template and verify Template limits========================")
initialtemplatelimit=self.gettemplatelimts()
browseup_template1=self.browse_upload_template()
+ # Adding time for limit to sync in background
+ time.sleep(120)
afteruploadtemplatelimit=self.gettemplatelimts()
if int(afteruploadtemplatelimit)!=(int(initialtemplatelimit)+1):
self.fail("Volume Resouce Count is not updated")
- self.delete_template(browseup_template1.id)
+ self.delete_template(browseup_template1)
except Exception as e:
self.fail("Exception occurred : %s" % e)
@@ -1617,7 +1667,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if afteruploadsecondarystoragelimit!=(initialsecondarystoragelimit+tmpldetails[0].size):
self.fail("Secondary Storage Resouce Count is not updated")
- self.delete_template(browseup_template1.id)
+ self.delete_template(browseup_template1)
except Exception as e:
self.fail("Exception occurred : %s" % e)
@@ -1636,15 +1686,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.delete_template(browseup_template1)
aftertemplatelimit=self.gettemplatelimts()
- if afteruploadtemplatlimit!=(initialtemplatelimit-1):
+ if aftertemplatelimit!=(initialtemplatelimit-1):
self.fail("Template Resource Count is not updated after deletion")
except Exception as e:
self.fail("Exceptione occurred : %s" % e)
return
-
-
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_09_Browser_Upload_Volume_secondary_storage_resource_limits_after_deletion(self):
"""
@@ -1666,7 +1714,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
afteruploadprimarystoragelimit=self.getstoragelimts(11)
- if afteruploadprimarystoragelimit!=(initialprimarystoragelimit-tempdetails[0].size):
+ if afteruploadprimarystoragelimit!=(initialuploadprimarystoragelimit-tmpldetails[0].size):
self.fail("Secondary Storage Resource Count is not updated after deletion")
except Exception as e:
@@ -1684,14 +1732,22 @@ class TestBrowseUploadVolume(cloudstackTestCase):
#Only register template, without uploading
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
+
+ if 'kvm' in self.hypervisor.lower():
+ cmd.url = 'http://dl.openvm.eu/cloudstack/centos/x86_64/centos-7-kvm.qcow2.bz2'
+ if 'vmware' in self.hypervisor.lower():
+ cmd.url = 'http://dl.openvm.eu/cloudstack/centos/x86_64/centos-7-vmware.ova'
+ if 'xenserver' in self.hypervisor.lower():
+ cmd.url = 'http://dl.openvm.eu/cloudstack/centos/x86_64/centos-7-xen.vhd.bz2'
+
cmd.zoneid = self.zone.id
- cmd.format = self.uploadtemplateformat
- cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
+ cmd.format = self.test_template.format
+ cmd.name=self.test_template.name+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
cmd.displaytext=cmd.name
- cmd.hypervisor=self.templatehypervisor
- cmd.ostypeid=self.templateostypeid
+ cmd.hypervisor=self.test_template.hypervisor
+ cmd.ostypeid=self.test_template.ostypeid
template_response=self.apiclient.getUploadParamsForTemplate(cmd)
#Destroy SSVM, and wait for new one to start
@@ -1709,7 +1765,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exceptione occurred : %s" % e)
return
-
@classmethod
def tearDownClass(self):
try:
diff --git a/test/integration/component/test_browse_templates2.py b/test/integration/component/test_browse_templates2.py
index 9fdf3f0..927dd34 100755
--- a/test/integration/component/test_browse_templates2.py
+++ b/test/integration/component/test_browse_templates2.py
@@ -189,7 +189,7 @@ class TestBrowseUploadTemplate(cloudstackTestCase):
#Destroy SSVM, and wait for new one to start
self.destroy_ssvm()
-
+ wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
#Verify that the template is cleaned up as part of sync-up during new SSVM start
list_template_response=Template.list(
self.apiclient,
diff --git a/test/integration/component/test_cpu_limits.py b/test/integration/component/test_cpu_limits.py
index 345d901..b9df649 100644
--- a/test/integration/component/test_cpu_limits.py
+++ b/test/integration/component/test_cpu_limits.py
@@ -37,56 +37,7 @@ from marvin.lib.utils import cleanup_resources
from marvin.codes import ERROR_NO_HOST_FOR_MIGRATION
-class Services:
- """Test resource limit services
- """
-
- def __init__(self):
- self.services = {
- "account": {
- "email": "test@test.com",
- "firstname": "Test",
- "lastname": "User",
- "username": "resource",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "service_offering": {
- "name": "Tiny Instance",
- "displaytext": "Tiny Instance",
- "cpunumber": 4,
- "cpuspeed": 100, # in MHz
- "memory": 128, # In MBs
- },
- "virtual_machine": {
- "displayname": "TestVM",
- "username": "root",
- "password": "password",
- "ssh_port": 22,
- "hypervisor": 'KVM',
- "privateport": 22,
- "publicport": 22,
- "protocol": 'TCP',
- },
- "network": {
- "name": "Test Network",
- "displaytext": "Test Network",
- "netmask": '255.255.255.0'
- },
- "project": {
- "name": "Project",
- "displaytext": "Test project",
- },
- "domain": {
- "name": "Domain",
- },
- "ostype": 'CentOS 5.3 (64-bit)',
- "sleep": 60,
- "timeout": 10,
- "mode": 'advanced',
- # Networking mode: Advanced, Basic
- }
+
class TestCPULimits(cloudstackTestCase):
@@ -94,24 +45,24 @@ class TestCPULimits(cloudstackTestCase):
def setUpClass(cls):
cls.testClient = super(TestCPULimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
-
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
+ #cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services["mode"] = cls.zone.networktype
+ cls.testdata["mode"] = cls.zone.networktype
+ #cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
-
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering_multiple_cores"]
)
cls._cleanup = [cls.service_offering, ]
@@ -131,7 +82,7 @@ class TestCPULimits(cloudstackTestCase):
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True
)
@@ -161,7 +112,7 @@ class TestCPULimits(cloudstackTestCase):
try:
vm = VirtualMachine.create(
api_client,
- self.services["virtual_machine"],
+ self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
@@ -194,7 +145,7 @@ class TestCPULimits(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
@@ -251,7 +202,7 @@ class TestCPULimits(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
@@ -292,7 +243,7 @@ class TestCPULimits(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
@@ -329,7 +280,7 @@ class TestCPULimits(cloudstackTestCase):
self.debug("Creating service offering with 4 CPU cores")
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
@@ -347,7 +298,7 @@ class TestCPULimits(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"]) * 4 #Total 4 Vms
+ expected_resource_count = int(self.service_offering.cpunumber) * 4 #Total 4 Vms
self.assertTrue(resource_count == expected_resource_count,
"Resource count does not match the expected vavlue")
return
@@ -358,25 +309,26 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
def setUpClass(cls):
cls.testClient = super(TestDomainCPULimitsConfiguration, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
- cls.services = Services().services
+ #cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services["mode"] = cls.zone.networktype
+ cls.testdata["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering_multiple_cores"]
)
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = [cls.service_offering, ]
return
@@ -415,7 +367,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
try:
vm = VirtualMachine.create(
api_client,
- self.services["virtual_machine"],
+ self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
@@ -436,12 +388,12 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
self.debug("Creating a sub-domain under: %s" % self.domain.name)
self.child_domain_1 = Domain.create(
self.apiclient,
- services=self.services["domain"],
+ services=self.testdata["domain"],
parentdomainid=self.domain.id
)
self.child_do_admin_1 = Account.create(
self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True,
domainid=self.child_domain_1.id
)
@@ -451,13 +403,13 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
self.child_domain_2 = Domain.create(
self.apiclient,
- services=self.services["domain"],
+ services=self.testdata["domain"],
parentdomainid=self.domain.id
)
self.child_do_admin_2 = Account.create(
self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True,
domainid=self.child_domain_2.id
)
@@ -502,7 +454,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should match with the expected resource count")
@@ -577,7 +529,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should with the expected resource count")
@@ -637,7 +589,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should match with the expected resource count")
@@ -671,7 +623,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
self.debug("Creating service offering with 4 CPU cores")
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
@@ -716,7 +668,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
)
resource_count = account_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"]) * 4 #Total 4 vms
+ expected_resource_count = int(self.service_offering.cpunumber) * 4 #Total 4 vms
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should with the expected resource count")
@@ -734,7 +686,7 @@ class TestDomainCPULimitsConfiguration(cloudstackTestCase):
)
resource_count_after_delete = account_list[0].cputotal
- expected_resource_count -= int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count -= int(self.service_offering.cpunumber)
self.assertEqual(resource_count_after_delete, expected_resource_count,
"Resource count should be less than before after deleting the instance")
diff --git a/test/integration/component/test_cpu_max_limits.py b/test/integration/component/test_cpu_max_limits.py
index 14dcad0..16341e5 100644
--- a/test/integration/component/test_cpu_max_limits.py
+++ b/test/integration/component/test_cpu_max_limits.py
@@ -34,76 +34,25 @@ from marvin.lib.common import (get_domain,
)
from marvin.lib.utils import cleanup_resources
-class Services:
- """Test resource limit services
- """
-
- def __init__(self):
- self.services = {
- "account": {
- "email": "test@test.com",
- "firstname": "Test",
- "lastname": "User",
- "username": "resource",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "service_offering": {
- "name": "Tiny Instance",
- "displaytext": "Tiny Instance",
- "cpunumber": 5,
- "cpuspeed": 100, # in MHz
- "memory": 128, # In MBs
- },
- "virtual_machine": {
- "displayname": "TestVM",
- "username": "root",
- "password": "password",
- "ssh_port": 22,
- "hypervisor": 'KVM',
- "privateport": 22,
- "publicport": 22,
- "protocol": 'TCP',
- },
- "network": {
- "name": "Test Network",
- "displaytext": "Test Network",
- "netmask": '255.255.255.0'
- },
- "project": {
- "name": "Project",
- "displaytext": "Test project",
- },
- "domain": {
- "name": "Domain",
- },
- "ostype": 'CentOS 5.3 (64-bit)',
- "sleep": 60,
- "timeout": 10,
- "mode": 'advanced',
- # Networking mode: Advanced, Basic
- }
-
class TestMaxCPULimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMaxCPULimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
- cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services["mode"] = cls.zone.networktype
+ cls.testdata["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@@ -143,7 +92,7 @@ class TestMaxCPULimits(cloudstackTestCase):
if account:
vm = VirtualMachine.create(
api_client,
- self.services["virtual_machine"],
+ self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=account.name,
domainid=account.domainid,
@@ -152,7 +101,7 @@ class TestMaxCPULimits(cloudstackTestCase):
elif project:
vm = VirtualMachine.create(
api_client,
- self.services["virtual_machine"],
+ self.testdata["virtual_machine"],
templateid=self.template.id,
projectid=project.id,
networkids=networks,
@@ -171,13 +120,13 @@ class TestMaxCPULimits(cloudstackTestCase):
self.debug("Creating a domain under: %s" % self.domain.name)
self.child_domain = Domain.create(self.apiclient,
- services=self.services["domain"],
+ services=self.testdata["domain"],
parentdomainid=self.domain.id)
self.debug("domain crated with domain id %s" % self.child_domain.id)
self.child_do_admin = Account.create(self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True,
domainid=self.child_domain.id)
@@ -186,7 +135,7 @@ class TestMaxCPULimits(cloudstackTestCase):
# Create project as a domain admin
self.project = Project.create(self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.child_do_admin.name,
domainid=self.child_do_admin.domainid)
# Cleanup created project at end of test
@@ -241,10 +190,12 @@ class TestMaxCPULimits(cloudstackTestCase):
self.debug("Creating service offering with 3 CPU cores")
- self.services["service_offering"]["cpunumber"] = 3
+ so = self.testdata["service_offering"]
+ so["cpunumber"] = 3
+
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ so
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
@@ -274,10 +225,9 @@ class TestMaxCPULimits(cloudstackTestCase):
self.debug("Creating service offering with 4 CPU cores")
- self.services["service_offering"]["cpunumber"] = 4
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
@@ -315,10 +265,11 @@ class TestMaxCPULimits(cloudstackTestCase):
self.debug("Creating service offering with 3 CPU cores")
- self.services["service_offering"]["cpunumber"] = 3
+ so = self.testdata["service_offering"]
+ so["cpunumber"] = 3
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ so
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
@@ -350,16 +301,15 @@ class TestMaxCPULimits(cloudstackTestCase):
self.debug("Creating service offering with 4 CPU cores")
- self.services["service_offering"]["cpunumber"] = 4
self.service_offering = ServiceOffering.create(
self.apiclient,
- self.services["service_offering"]
+ self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
- self.setupAccounts(account_limit=6, domain_limit=6, project_limit=6)
+ self.setupAccounts(account_limit=5, domain_limit=5, project_limit=5)
api_client_admin = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
diff --git a/test/integration/component/test_cpu_project_limits.py b/test/integration/component/test_cpu_project_limits.py
index 4bb0cff..8a43b76 100644
--- a/test/integration/component/test_cpu_project_limits.py
+++ b/test/integration/component/test_cpu_project_limits.py
@@ -36,80 +36,29 @@ from marvin.lib.common import (get_domain,
from marvin.lib.utils import cleanup_resources
from marvin.codes import ERROR_NO_HOST_FOR_MIGRATION
-class Services:
- """Test resource limit services
- """
-
- def __init__(self):
- self.services = {
- "account": {
- "email": "test@test.com",
- "firstname": "Test",
- "lastname": "User",
- "username": "resource",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "service_offering": {
- "name": "Tiny Instance",
- "displaytext": "Tiny Instance",
- "cpunumber": 4,
- "cpuspeed": 100, # in MHz
- "memory": 128, # In MBs
- },
- "virtual_machine": {
- "displayname": "TestVM",
- "username": "root",
- "password": "password",
- "ssh_port": 22,
- "hypervisor": 'KVM',
- "privateport": 22,
- "publicport": 22,
- "protocol": 'TCP',
- },
- "network": {
- "name": "Test Network",
- "displaytext": "Test Network",
- "netmask": '255.255.255.0'
- },
- "project": {
- "name": "Project",
- "displaytext": "Test project",
- },
- "domain": {
- "name": "Domain",
- },
- "ostype": 'CentOS 5.3 (64-bit)',
- "sleep": 60,
- "timeout": 10,
- "mode": 'advanced',
- # Networking mode: Advanced, Basic
- }
-
class TestProjectsCPULimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectsCPULimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
- cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services["mode"] = cls.zone.networktype
+ cls.testdata["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering_multiple_cores"]
)
cls._cleanup = [cls.service_offering, ]
@@ -129,7 +78,7 @@ class TestProjectsCPULimits(cloudstackTestCase):
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True
)
@@ -167,7 +116,7 @@ class TestProjectsCPULimits(cloudstackTestCase):
try:
self.vm = VirtualMachine.create(
api_client,
- self.services["virtual_machine"],
+ self.testdata["virtual_machine"],
templateid=self.template.id,
projectid=project.id,
networkids=networks,
@@ -187,18 +136,18 @@ class TestProjectsCPULimits(cloudstackTestCase):
self.debug("Creating a domain under: %s" % self.domain.name)
self.domain = Domain.create(self.apiclient,
- services=self.services["domain"],
+ services=self.testdata["domain"],
parentdomainid=self.domain.id)
self.admin = Account.create(
self.apiclient,
- self.services["account"],
+ self.testdata["account"],
admin=True,
domainid=self.domain.id
)
# Create project as a domain admin
self.project = Project.create(self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.admin.name,
domainid=self.admin.domainid)
# Cleanup created project at end of test
@@ -235,7 +184,7 @@ class TestProjectsCPULimits(cloudstackTestCase):
)
resource_count = project_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
@@ -292,7 +241,7 @@ class TestProjectsCPULimits(cloudstackTestCase):
)
resource_count = project_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
@@ -334,7 +283,7 @@ class TestProjectsCPULimits(cloudstackTestCase):
)
resource_count = project_list[0].cputotal
- expected_resource_count = int(self.services["service_offering"]["cpunumber"])
+ expected_resource_count = int(self.service_offering.cpunumber)
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
diff --git a/test/integration/component/test_dynamic_compute_offering.py b/test/integration/component/test_dynamic_compute_offering.py
index eb1f070..1889fbf 100644
--- a/test/integration/component/test_dynamic_compute_offering.py
+++ b/test/integration/component/test_dynamic_compute_offering.py
@@ -62,7 +62,7 @@ class TestDynamicServiceOffering(cloudstackTestCase):
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
- cls.hypervisor = self.testClient.getHypervisorInfo()
+ cls.hypervisor = testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ['lxc']:
raise unittest.SkipTest("dynamic scaling feature is not supported on %s" % cls.hypervisor.lower())
diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py
index 9d13a23..f3bce20 100755
--- a/test/integration/component/test_egress_fw_rules.py
+++ b/test/integration/component/test_egress_fw_rules.py
@@ -424,6 +424,7 @@ class TestEgressFWRules(cloudstackTestCase):
"| grep -oP \'\d+(?=% packet loss)\'",
"['100']",
negative_test=False)
+ # If failed probably we've hit this issue: CLOUDSTACK-10075
@attr(tags=["advanced"], required_hardware="true")
def test_02_egress_fr2(self):
diff --git a/test/integration/component/test_escalation_listTemplateDomainAdmin.py b/test/integration/component/test_escalation_listTemplateDomainAdmin.py
index 087e45b..f0c4929 100644
--- a/test/integration/component/test_escalation_listTemplateDomainAdmin.py
+++ b/test/integration/component/test_escalation_listTemplateDomainAdmin.py
@@ -102,23 +102,8 @@ class TestlistTemplatesDomainAdmin(cloudstackTestCase):
domainid=self.domain1.id)
template_register.download(self.apiclient)
- # self.cleanup.append(self.template_register)
-
- time.sleep(self.testdata["sleep"])
- timeout = self.testdata["timeout"]
- while True:
- listTemplateResponse = Template.list(
- self.apiclient,
- templatefilter="all",
- id=template_register.id,
- account=self.account1.name,
- domainid=self.domain1.id
- )
- status = validateList(listTemplateResponse)
- self.assertEquals(
- PASS,
- status[0],
- "Template creation failed")
+
+ self.download(self.apiclient, template_register.id)
listtemplate = Template.list(
self.apiclient,
@@ -126,13 +111,57 @@ class TestlistTemplatesDomainAdmin(cloudstackTestCase):
hypervisor=self.hypervisor,
account=self.account2.name,
domainid=self.account2.domainid,
- templatefilter="all"
-
- )
+ templatefilter="executable")
self.assertEqual(
listtemplate,
None,
- "Check templates are not listed"
+ "Check templates are not listed - CLOUDSTACK-10149"
)
return
+
+ def download(self, apiclient, template_id, retries=12, interval=5):
+ """Check if template download will finish in 1 minute"""
+ while retries > -1:
+ time.sleep(interval)
+ template_response = Template.list(
+ apiclient,
+ id=template_id,
+ zoneid=self.zone.id,
+ templatefilter='self'
+ )
+
+ if isinstance(template_response, list):
+ template = template_response[0]
+ if not hasattr(template, 'status') or not template or not template.status:
+ retries = retries - 1
+ continue
+
+ # If template is ready,
+ # template.status = Download Complete
+ # Downloading - x% Downloaded
+ # if Failed
+ # Error - Any other string
+ if 'Failed' in template.status:
+ raise Exception(
+ "Failed to download template: status - %s" %
+ template.status)
+
+ elif template.status == 'Download Complete' and template.isready:
+ return
+
+ elif 'Downloaded' in template.status:
+ retries = retries - 1
+ continue
+
+ elif 'Installing' not in template.status:
+ if retries >= 0:
+ retries = retries - 1
+ continue
+ raise Exception(
+ "Error in downloading template: status - %s" %
+ template.status)
+
+ else:
+ retries = retries - 1
+ raise Exception("Template download failed exception.")
\ No newline at end of file
diff --git a/test/integration/component/test_escalations_instances.py b/test/integration/component/test_escalations_instances.py
index bbff907..9e0d27e 100644
--- a/test/integration/component/test_escalations_instances.py
+++ b/test/integration/component/test_escalations_instances.py
@@ -2293,11 +2293,12 @@ class TestInstances(cloudstackTestCase):
6. Issue "reset VM" command on CCP
7. check disk sequence on hypervisor remains same and VM starts successfully
"""
+ if self.hypervisor.lower() in ['kvm', 'hyperv', 'lxc', 'vmware']:
+ self.skipTest(
+ "This test not applicable on existing hypervisor. Hence,\
+ skipping the test")
try:
- if self.hypervisor.lower() in ['kvm', 'hyperv', 'lxc', 'vmware']:
- self.skipTest(
- "This test not applicable on existing hypervisor. Hence,\
- skipping the test")
+
template = Template.register(self.apiClient,
self.services["Windows 7 (64-bit)"],
zoneid=self.zone.id,
@@ -2469,7 +2470,6 @@ class TestInstances(cloudstackTestCase):
serviceofferingid=self.service_offering.id
)
- self.cleanup.append(virtual_machine)
self.debug("creating an instance with template ID: %s" % self.template.id)
vm_response = VirtualMachine.list(self.apiClient,
id=virtual_machine.id,
@@ -2505,7 +2505,6 @@ class TestInstances(cloudstackTestCase):
domainid=self.account.domainid,
diskofferingid=disk_offering.id
)
- self.cleanup.append(volume)
# Check List Volume response for newly created volume
list_volume_response = Volume.list(
self.apiClient,
diff --git a/test/integration/component/test_escalations_ipaddresses.py b/test/integration/component/test_escalations_ipaddresses.py
index 03e36b96..4a971f0 100644
--- a/test/integration/component/test_escalations_ipaddresses.py
+++ b/test/integration/component/test_escalations_ipaddresses.py
@@ -2396,8 +2396,8 @@ class TestIpAddresses(cloudstackTestCase):
# Creating expected and actual values dictionaries
expected_dict = {
"ipaddressid": associated_ipaddress.ipaddress.id,
- "startport": "22",
- "endport": "2222",
+ "startport": 22,
+ "endport": 2222,
"protocol": "tcp",
"cidrlist": "10.1.1.1/16"
}
diff --git a/test/integration/component/test_escalations_networks.py b/test/integration/component/test_escalations_networks.py
index e966b94..8be177b 100644
--- a/test/integration/component/test_escalations_networks.py
+++ b/test/integration/component/test_escalations_networks.py
@@ -329,7 +329,7 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true")
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -444,7 +444,7 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true")
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -619,9 +619,8 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient,
- isdefault="true",
- )
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
+
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -1144,7 +1143,7 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true")
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -1287,7 +1286,7 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true")
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -1391,9 +1390,8 @@ class TestNetworks_1(cloudstackTestCase):
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest("This feature is not supported on existing hypervisor. Hence, skipping the test")
# List VPC Offering
- vpc_offs_list = VpcOffering.list(self.userapiclient,
- isdefault="true",
- )
+ vpc_offs_list = VpcOffering.list(self.userapiclient, isdefault="true", name='Default VPC offering')
+
if vpc_offs_list is None:
self.fail("Default VPC offerings not found")
else:
@@ -2146,7 +2144,7 @@ class TestNetworks_2(cloudstackTestCase):
)
# Verifying the details of the EgressFirewall Rule
expected_dict = {
- "cidrlist": self.test_data["ingress_rule"]["cidrlist"],
+ "cidrlist": self.zone.guestcidraddress,
"id": egressfirewallrule_created.id,
"networkid": egressfirewallrule_created.networkid,
"protocol": self.test_data["ingress_rule"]["protocol"]
diff --git a/test/integration/component/test_escalations_routers.py b/test/integration/component/test_escalations_routers.py
index 73e4bc7..455be14 100644
--- a/test/integration/component/test_escalations_routers.py
+++ b/test/integration/component/test_escalations_routers.py
@@ -46,6 +46,7 @@ class TestVR(cloudstackTestCase):
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
# Get Zone, Domain and templates
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
@@ -173,8 +174,8 @@ class TestVR(cloudstackTestCase):
result = get_process_status(
host.ipaddress,
22,
- self.services["configurableData"]["host"]["username"],
- self.services["configurableData"]["host"]["password"],
+ self.hostConfig["username"],
+ self.hostConfig["password"],
router.linklocalip,
"lsmod | grep ftp"
)
diff --git a/test/integration/component/test_escalations_templates.py b/test/integration/component/test_escalations_templates.py
index 4cffda3..12f98ac 100644
--- a/test/integration/component/test_escalations_templates.py
+++ b/test/integration/component/test_escalations_templates.py
@@ -131,6 +131,52 @@ class TestTemplates(cloudstackTestCase):
(exp_val, act_val))
return return_flag
+ def download(self, apiclient, template_id, retries=72, interval=5):
+ """Check if template download will finish in 6 minutes"""
+ while retries > -1:
+ time.sleep(interval)
+ template_response = Template.list(
+ apiclient,
+ id=template_id,
+ zoneid=self.zone.id,
+ templatefilter='self'
+ )
+
+ if isinstance(template_response, list):
+ template = template_response[0]
+ if not hasattr(template, 'status') or not template or not template.status:
+ retries = retries - 1
+ continue
+
+ # If template is ready,
+ # template.status = Download Complete
+ # Downloading - x% Downloaded
+ # if Failed
+ # Error - Any other string
+ if 'Failed' in template.status:
+ raise Exception(
+ "Failed to download template: status - %s" %
+ template.status)
+
+ elif template.status == 'Download Complete' and template.isready:
+ return
+
+ elif 'Downloaded' in template.status:
+ retries = retries - 1
+ continue
+
+ elif 'Installing' not in template.status:
+ if retries >= 0:
+ retries = retries - 1
+ continue
+ raise Exception(
+ "Error in downloading template: status - %s" %
+ template.status)
+
+ else:
+ retries = retries - 1
+ raise Exception("Template download failed exception.")
+
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_01_list_templates_pagination(self):
"""
@@ -238,32 +284,7 @@ class TestTemplates(cloudstackTestCase):
)
# Verifying the state of the template to be ready. If not waiting for
# state to become ready
- template_ready = False
- count = 0
- while template_ready is False:
- list_template = Template.list(
- self.userapiclient,
- id=template_created.id,
- listall=self.services["listall"],
- templatefilter=self.services["templatefilter"],
- )
- status = validateList(list_template)
- self.assertEquals(
- PASS,
- status[0],
- "Failed to list Templates by Id"
- )
- if list_template[0].isready is True:
- template_ready = True
- elif (str(list_template[0].status) == "Error"):
- self.fail("Created Template is in Errored state")
- break
- elif count > 10:
- self.fail("Timed out before Template came into ready state")
- break
- else:
- time.sleep(self.services["sleep"])
- count = count + 1
+ self.download(self.apiClient, template_created.id)
# Deleting the Template present in page 2
Template.delete(
@@ -346,32 +367,7 @@ class TestTemplates(cloudstackTestCase):
)
# Verifying the state of the template to be ready. If not waiting for
# state to become ready till time out
- template_ready = False
- count = 0
- while template_ready is False:
- list_template = Template.list(
- self.userapiclient,
- id=template_created.id,
- listall=self.services["listall"],
- templatefilter=self.services["templatefilter"],
- )
- status = validateList(list_template)
- self.assertEquals(
- PASS,
- status[0],
- "Failed to list Templates by Id"
- )
- if list_template[0].isready is True:
- template_ready = True
- elif (str(list_template[0].status) == "Error"):
- self.fail("Created Template is in Errored state")
- break
- elif count > 10:
- self.fail("Timed out before Template came into ready state")
- break
- else:
- time.sleep(self.services["sleep"])
- count = count + 1
+ self.download(self.apiClient, template_created.id)
# Downloading the Template name
download_template = Template.extract(
@@ -470,32 +466,7 @@ class TestTemplates(cloudstackTestCase):
)
# Verifying the state of the template to be ready. If not waiting for
# state to become ready till time out
- template_ready = False
- count = 0
- while template_ready is False:
- list_template = Template.list(
- self.userapiclient,
- id=template_created.id,
- listall=self.services["listall"],
- templatefilter=self.services["templatefilter"],
- )
- status = validateList(list_template)
- self.assertEquals(
- PASS,
- status[0],
- "Failed to list Templates by Id"
- )
- if list_template[0].isready is True:
- template_ready = True
- elif (str(list_template[0].status) == "Error"):
- self.fail("Created Template is in Errored state")
- break
- elif count > 10:
- self.fail("Timed out before Template came into ready state")
- break
- else:
- time.sleep(self.services["sleep"])
- count = count + 1
+ self.download(self.apiClient, template_created.id)
# Editing the Template name
edited_template = Template.update(
@@ -834,33 +805,7 @@ class TestTemplates(cloudstackTestCase):
)
# Verifying the state of the template to be ready. If not waiting
# for state to become ready till time out
- template_ready = False
- count = 0
- while template_ready is False:
- list_template = Template.list(
- self.userapiclient,
- id=template_created.id,
- listall=self.services["listall"],
- templatefilter=self.services["templatefilter"],
- )
- status = validateList(list_template)
- self.assertEquals(
- PASS,
- status[0],
- "Failed to list Templates by Id"
- )
- if list_template[0].isready is True:
- template_ready = True
- elif (str(list_template[0].status) == "Error"):
- self.fail("Created Template is in Errored state")
- break
- elif count > 10:
- self.fail(
- "Timed out before Template came into ready state")
- break
- else:
- time.sleep(self.services["sleep"])
- count = count + 1
+ self.download(self.apiClient, template_created.id)
# Copying the Template from Zone1 to Zone2
copied_template = template_created.copy(
diff --git a/test/integration/component/test_host_ha.py b/test/integration/component/test_host_ha.py
index 2af5ea9..cb2a869 100644
--- a/test/integration/component/test_host_ha.py
+++ b/test/integration/component/test_host_ha.py
@@ -45,6 +45,8 @@ class TestHostHA(cloudstackTestCase):
self.services = self.testClient.getParsedTestDataConfig()
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
self.pod = get_pod(self.apiclient, self.zone.id)
+ self.hostConfig = self.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
+
self.cleanup = []
self.services = {
"service_offering": {
@@ -98,6 +100,7 @@ class TestHostHA(cloudstackTestCase):
def createVMs(self, hostId, number, local):
+
self.template = get_template(
self.apiclient,
self.zone.id,
@@ -154,7 +157,8 @@ class TestHostHA(cloudstackTestCase):
def checkHostDown(self, fromHostIp, testHostIp):
try:
- ssh = SshClient(fromHostIp, 22, "root", "password")
+ ssh = SshClient(fromHostIp, 22, self.hostConfig["username"],
+ self.hostConfig["password"])
res = ssh.execute("ping -c 1 %s" % testHostIp)
result = str(res)
if result.count("100% packet loss") == 1:
@@ -167,7 +171,8 @@ class TestHostHA(cloudstackTestCase):
def checkHostUp(self, fromHostIp, testHostIp):
try:
- ssh = SshClient(fromHostIp, 22, "root", "password")
+ ssh = SshClient(fromHostIp, 22, self.hostConfig["username"],
+ self.hostConfig["password"])
res = ssh.execute("ping -c 1 %s" % testHostIp)
result = str(res)
if result.count(" 0% packet loss") == 1:
@@ -273,9 +278,10 @@ class TestHostHA(cloudstackTestCase):
srcFile = os.path.dirname(os.path.realpath(__file__)) + "/test_host_ha.sh"
if not(os.path.isfile(srcFile)):
self.logger.debug("File %s not found" % srcFile)
- raise unittest.SkipTest("Script file %s required for HA not found" % srcFile);
+ raise unittest.SkipTest("Script file %s required for HA not found" % srcFile)
- ssh = SshClient(hostIp, 22, "root", "password")
+ ssh = SshClient(hostIp, 22, self.hostConfig["username"],
+ self.hostConfig["password"])
ssh.scp(srcFile, "/root/test_host_ha.sh")
ssh.execute("nohup sh /root/test_host_ha.sh -t %s -d all > /dev/null 2>&1 &\n" % timeout)
return
@@ -284,14 +290,13 @@ class TestHostHA(cloudstackTestCase):
srcFile = os.path.dirname(os.path.realpath(__file__)) + "/test_host_ha.sh"
if not(os.path.isfile(srcFile)):
self.logger.debug("File %s not found" % srcFile)
- raise unittest.SkipTest("Script file %s required for HA not found" % srcFile);
+ raise unittest.SkipTest("Script file %s required for HA not found" % srcFile)
- ssh = SshClient(hostIp, 22, "root", "password")
+ ssh = SshClient(hostIp, 22, self.hostConfig["username"], self.hostConfig["password"])
ssh.scp(srcFile, "/root/test_host_ha.sh")
ssh.execute("nohup sh /root/test_host_ha.sh -t %s -d agent > /dev/null 2>&1 &\n" % timeout)
return
-
@attr(
tags=[
"advanced",
@@ -302,12 +307,9 @@ class TestHostHA(cloudstackTestCase):
"sg"],
required_hardware="true")
def test_01_host_ha_with_nfs_storagepool_with_vm(self):
- raise unittest.SkipTest("Skipping this test as this is for NFS store only.");
- return
if not(self.isOnlyNFSStorageAvailable()):
- raise unittest.SkipTest("Skipping this test as this is for NFS store only.");
- return
+ raise unittest.SkipTest("Skipping this test as this is for NFS store only.")
listHost = Host.list(
self.apiclient,
@@ -320,9 +322,9 @@ class TestHostHA(cloudstackTestCase):
if len(listHost) != 2:
- self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost));
- raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost));
- return
+ self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
+ raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
+
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
@@ -382,11 +384,9 @@ class TestHostHA(cloudstackTestCase):
"sg"],
required_hardware="true")
def test_02_host_ha_with_local_storage_and_nfs(self):
- raise unittest.SkipTest("Skipping this test as this is for NFS store only.");
- return
+
if not(self.isLocalAndNFSStorageAvailable()):
- raise unittest.SkipTest("Skipping this test as this is for Local storage and NFS storage only.");
- return
+ raise unittest.SkipTest("Skipping this test as this is for Local storage and NFS storage only.")
listHost = Host.list(
self.apiclient,
@@ -399,9 +399,8 @@ class TestHostHA(cloudstackTestCase):
if len(listHost) != 2:
- self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost));
- raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost));
- return
+ self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
+ raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
@@ -462,12 +461,9 @@ class TestHostHA(cloudstackTestCase):
"sg"],
required_hardware="true")
def test_03_host_ha_with_only_local_storage(self):
- raise unittest.SkipTest("Skipping this test as this is for NFS store only.");
- return
if not(self.isOnlyLocalStorageAvailable()):
- raise unittest.SkipTest("Skipping this test as this is for Local storage only.");
- return
+ raise unittest.SkipTest("Skipping this test as this is for Local storage only.")
listHost = Host.list(
self.apiclient,
@@ -480,9 +476,8 @@ class TestHostHA(cloudstackTestCase):
if len(listHost) != 2:
- self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost));
- raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost));
- return
+ self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
+ raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
@@ -543,8 +538,7 @@ class TestHostHA(cloudstackTestCase):
def test_04_host_ha_vmactivity_check(self):
if not(self.isOnlyNFSStorageAvailable()):
- raise unittest.SkipTest("Skipping this test as this is for NFS store only.");
- return
+ raise unittest.SkipTest("Skipping this test as this is for NFS store only.")
listHost = Host.list(
self.apiclient,
@@ -557,9 +551,8 @@ class TestHostHA(cloudstackTestCase):
if len(listHost) != 2:
- self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost));
- raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost));
- return
+ self.logger.debug("Host HA can be tested with two host only %s, found" % len(listHost))
+ raise unittest.SkipTest("Host HA can be tested with two host only %s, found" % len(listHost))
no_of_vms = self.noOfVMsOnHost(listHost[0].id)
diff --git a/test/integration/component/test_interop_xd_ccp.py b/test/integration/component/test_interop_xd_ccp.py
index 5d38df2..4b6b15d 100644
--- a/test/integration/component/test_interop_xd_ccp.py
+++ b/test/integration/component/test_interop_xd_ccp.py
@@ -95,7 +95,7 @@ class TestXDCCPInterop(cloudstackTestCase):
break
if cls.uploadtemplateformat=="KVM":
- assert False, "Interop is not supported on KVM"
+ raise unittest.SkipTest("Interop is not supported on KVM")
cls.uploadurl=cls.services["interop"][cls.uploadtemplateformat]["url"]
diff --git a/test/integration/component/test_migrate_vol_to_maintained_pool.py b/test/integration/component/test_migrate_vol_to_maintained_pool.py
index 6c83470..5941d67 100644
--- a/test/integration/component/test_migrate_vol_to_maintained_pool.py
+++ b/test/integration/component/test_migrate_vol_to_maintained_pool.py
@@ -116,6 +116,9 @@ class TestMigrationMaintainedPool(cloudstackTestCase):
if len(storage_pools_response) < 2 :
self.skipTest("Atleast two storage pools are need to test Storage migration")
+ if self.hypervisor.lower() in ['kvm']:
+ self.virtual_machine.stop(self.apiclient)
+
list_volumes_response = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
diff --git a/test/integration/component/test_multiple_public_interfaces.py b/test/integration/component/test_multiple_public_interfaces.py
index 50ec552..3116465 100644
--- a/test/integration/component/test_multiple_public_interfaces.py
+++ b/test/integration/component/test_multiple_public_interfaces.py
@@ -20,8 +20,7 @@
interface on VR (eth3, eth4 etc) and iptable
"""
# Import Local Modules
-from marvin.codes import (FAILED, STATIC_NAT_RULE, LB_RULE,
- NAT_RULE, PASS)
+from marvin.codes import (FAILED)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackException import CloudstackAPIException
from marvin.cloudstackAPI import rebootRouter
@@ -47,13 +46,7 @@ from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_hosts,
- list_publicIP,
- list_nat_rules,
- list_routers,
- list_virtual_machines,
- list_lb_rules,
- list_configurations,
- verifyGuestTrafficPortGroups)
+ list_routers)
from nose.plugins.attrib import attr
from ddt import ddt, data
# Import System modules
@@ -312,7 +305,6 @@ class TestStaticNat(cloudstackTestCase):
self.apiclient,
self.services["publiciprange"]
)
-
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
self.apiclient,
@@ -477,6 +469,7 @@ class TestRouting(cloudstackTestCase):
self.apiclient,
self.services["publiciprange"]
)
+ self._cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
@@ -734,6 +727,7 @@ class TestIptables(cloudstackTestCase):
self.apiclient,
self.services["publiciprange"]
)
+ self._cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
@@ -750,7 +744,6 @@ class TestIptables(cloudstackTestCase):
self.services["virtual_machine"]
)
self.cleanup.append(ip_address)
- self.cleanup.append(self.public_ip_range)
# Check if VM is in Running state before creating NAT and firewall rules
vm_response = VirtualMachine.list(
self.apiclient,
@@ -783,7 +776,6 @@ class TestIptables(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
-
# Get the router details associated with account
routers = list_routers(
self.apiclient,
@@ -838,7 +830,7 @@ class TestIptables(cloudstackTestCase):
1,
"Check to ensure there is a iptable rule to accept the RELATED,ESTABLISHED traffic"
)
-
+ firewall_rule.delete(self.apiclient)
class TestVPCPortForwarding(cloudstackTestCase):
diff --git a/test/integration/component/test_overcommit.py b/test/integration/component/test_overcommit.py
index db04316..14b76f5 100644
--- a/test/integration/component/test_overcommit.py
+++ b/test/integration/component/test_overcommit.py
@@ -37,13 +37,13 @@ from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
-def ssh_xen_host(password, ipaddr, instance_name):
+def ssh_xen_host(password, username, ipaddr, instance_name):
"""Ssh into xen host and get vm mem details"""
mem = []
sshClient = SshClient(
ipaddr,
22,
- "root",
+ username,
password
)
command = "xe vm-list params=all name-label=%s" % instance_name
@@ -57,13 +57,14 @@ def ssh_xen_host(password, ipaddr, instance_name):
return mem
-def ssh_kvm_host(password, ipaddr, instance_name):
+def ssh_kvm_host(password, user, ipaddr, instance_name):
"""Ssh into kvm host and get vm mem details"""
mem = []
+
sshClient = SshClient(
ipaddr,
22,
- "root",
+ user,
password
)
@@ -99,8 +100,8 @@ class Overcommit (cloudstackTestCase):
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls.testdata["mode"] = cls.zone.networktype
- cls.testdata["configurableData"]["password"] = "xenroot"
cls.hypervisor = testClient.getHypervisorInfo()
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.template = get_template(
cls.apiclient,
@@ -253,14 +254,16 @@ class Overcommit (cloudstackTestCase):
if listHost[0].hypervisor.lower() == 'xenserver':
k = ssh_xen_host(
- self.testdata["configurableData"]["password"],
+ self.hostConfig["password"],
+ self.hostConfig["username"],
listHost[0].ipaddress,
self.deployVmResponse.instancename)
elif listHost[0].hypervisor.lower() == 'kvm':
k = ssh_kvm_host(
- self.testdata["configurableData"]["password"],
+ self.hostConfig["password"],
+ self.hostConfig["username"],
listHost[0].ipaddress,
self.deployVmResponse.instancename)
@@ -278,14 +281,16 @@ class Overcommit (cloudstackTestCase):
if listHost[0].hypervisor.lower() == 'xenserver':
k1 = ssh_xen_host(
- self.testdata["configurableData"]["password"],
+ self.hostConfig["password"],
+ self.hostConfig["username"],
listHost[0].ipaddress,
self.deployVmResponse.instancename)
elif listHost[0].hypervisor.lower() == 'kvm':
time.sleep(200)
k1 = ssh_kvm_host(
- self.testdata["configurableData"]["password"],
+ self.hostConfig["password"],
+ self.hostConfig["username"],
listHost[0].ipaddress,
self.deployVmResponse.instancename)
self.assertEqual(k1[0],
diff --git a/test/integration/component/test_persistent_networks.py b/test/integration/component/test_persistent_networks.py
index f8beed0..69ed493 100644
--- a/test/integration/component/test_persistent_networks.py
+++ b/test/integration/component/test_persistent_networks.py
@@ -62,7 +62,8 @@ class TestPersistentNetworks(cloudstackTestCase):
# Fill services from the external config file
cls.services = cls.testClient.getParsedTestDataConfig()
-
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
+ 0].__dict__
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
@@ -174,8 +175,8 @@ class TestPersistentNetworks(cloudstackTestCase):
sshClient = SshClient(
host=sourceip,
port=self.services['configurableData']['host']["publicport"],
- user=self.services['configurableData']['host']["username"],
- passwd=self.services['configurableData']['host']["password"])
+ user=self.hostConfig['username'],
+ passwd=self.hostConfig["password"])
res = sshClient.execute("ping -c 1 %s" % (
router.linklocalip
))
@@ -429,7 +430,8 @@ class TestPersistentNetworks(cloudstackTestCase):
isolated_network.update(
self.apiclient,
networkofferingid=self.isolated_persistent_network_offering.id,
- changecidr=changecidr)
+ changecidr=changecidr,
+ forced=True)
try:
virtual_machine = VirtualMachine.create(
@@ -1539,9 +1541,12 @@ class TestPersistentNetworks(cloudstackTestCase):
self.assertEqual(validateList(hosts)[0],PASS,"Check list host returns a valid list")
host = hosts[0]
result = get_process_status(
- host.ipaddress,22, self.services["acl"]["host"]["username"],self.services["acl"]["host"]["password"], router.linklocalip,
+ host.ipaddress,22,
+ self.hostConfig["username"],
+ self.hostConfig["password"],
+ router.linklocalip,
"iptables -I INPUT 1 -j DROP"
- )
+ )
except Exception as e:
raise Exception("Exception raised in accessing/running the command on hosts : %s " % e)
except Exception as e:
@@ -2043,7 +2048,8 @@ class TestRestartPersistentNetwork(cloudstackTestCase):
# Fill services from the external config file
cls.services = cls.testClient.getParsedTestDataConfig()
-
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
+ 0].__dict__
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
@@ -2153,8 +2159,8 @@ class TestRestartPersistentNetwork(cloudstackTestCase):
sshClient = SshClient(
host=sourceip,
port=self.services['configurableData']['host']["publicport"],
- user=self.services['configurableData']['host']["username"],
- passwd=self.services['configurableData']['host']["password"])
+ user=self.hostConfig['username'],
+ passwd=self.hostConfig['password'])
res = sshClient.execute("ping -c 1 %s" % (
router.linklocalip
))
@@ -3094,6 +3100,7 @@ class TestVPCNetworkOperations(cloudstackTestCase):
persistent_network_2.id
)
+ # CLOUDSTACK-8451 needs to be fixed in order to work
self.CheckIngressEgressConnectivityofVM(
virtual_machine_1,
ipaddress_1.ipaddress.ipaddress)
diff --git a/test/integration/component/test_portable_ip.py b/test/integration/component/test_portable_ip.py
index 32e7b75..d329e64 100644
--- a/test/integration/component/test_portable_ip.py
+++ b/test/integration/component/test_portable_ip.py
@@ -1367,7 +1367,8 @@ class TestPortableIpTransferAcrossNetworks(cloudstackTestCase):
self.virtual_machine2.password
)
except Exception as e:
- self.fail("Exception while SSHing : %s" % e)
+ self.fail("Probably hit issue: CLOUDSTACK-10078, "
+ "Exception while SSHing : %s" % e)
finally:
self.debug("disassociating portable ip: %s" % portableip.ipaddress.ipaddress)
diff --git a/test/integration/component/test_project_configs.py b/test/integration/component/test_project_configs.py
index c7028ee..e2925ec 100644
--- a/test/integration/component/test_project_configs.py
+++ b/test/integration/component/test_project_configs.py
@@ -27,84 +27,6 @@ from marvin.lib.common import *
from marvin.sshClient import SshClient
import datetime
-
-class Services:
- """Test Project Services
- """
-
- def __init__(self):
- self.services = {
- "domain": {
- "name": "Domain",
- },
- "project": {
- "name": "Project",
- "displaytext": "Test project",
- },
- "mgmt_server": {
- "ipaddress": '192.168.100.21',
- "username": 'root',
- "password": 'password',
- "port": 22,
- },
- "account": {
- "email": "administrator@clogeny.com",
- "firstname": "Test",
- "lastname": "User",
- "username": "test",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "user": {
- "email": "administrator@clogeny.com",
- "firstname": "User",
- "lastname": "User",
- "username": "User",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "service_offering": {
- "name": "Tiny Instance",
- "displaytext": "Tiny Instance",
- "cpunumber": 1,
- "cpuspeed": 100, # in MHz
- "memory": 128, # In MBs
- },
- "virtual_machine": {
- "displayname": "Test VM",
- "username": "root",
- "password": "password",
- "ssh_port": 22,
- "hypervisor": 'XenServer',
- # Hypervisor type should be same as
- # hypervisor type of cluster
- "privateport": 22,
- "publicport": 22,
- "protocol": 'TCP',
- },
- "template": {
- "displaytext": "Public Template",
- "name": "Public template",
- "ostype": 'CentOS 5.3 (64-bit)',
- "url": "http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
- "hypervisor": 'XenServer',
- "format": 'VHD',
- "isfeatured": True,
- "ispublic": True,
- "isextractable": True,
- },
- "configs": {
- "project.invite.timeout": 300,
- },
- "ostype": 'CentOS 5.3 (64-bit)',
- # Cent OS 5.3 (64 bit)
- "sleep": 60,
- "timeout": 10,
- }
-
-
class TestUserProjectCreation(cloudstackTestCase):
@classmethod
@@ -112,9 +34,9 @@ class TestUserProjectCreation(cloudstackTestCase):
cls.testClient = super(TestUserProjectCreation, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
configs = Configurations.list(
cls.api_client,
@@ -129,19 +51,19 @@ class TestUserProjectCreation(cloudstackTestCase):
# Create domains, account etc.
cls.domain = Domain.create(
cls.api_client,
- cls.services["domain"]
+ cls.testdata["domain"]
)
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
@@ -203,7 +125,7 @@ class TestUserProjectCreation(cloudstackTestCase):
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -239,7 +161,7 @@ class TestUserProjectCreation(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.user.name,
domainid=self.user.domainid
)
@@ -276,9 +198,9 @@ class TestProjectCreationNegative(cloudstackTestCase):
cls.testClient = super(TestProjectCreationNegative, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
# Checking for prereqisits - global configs
configs = Configurations.list(
@@ -294,19 +216,19 @@ class TestProjectCreationNegative(cloudstackTestCase):
# Create domains, account etc.
cls.domain = Domain.create(
cls.api_client,
- cls.services["domain"]
+ cls.testdata["domain"]
)
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
@@ -367,7 +289,7 @@ class TestProjectCreationNegative(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -403,7 +325,7 @@ class TestProjectCreationNegative(cloudstackTestCase):
with self.assertRaises(Exception):
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.user.name,
domainid=self.user.domainid
)
@@ -419,9 +341,9 @@ class TestProjectInviteRequired(cloudstackTestCase):
cls.testClient = super(TestProjectInviteRequired, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
@@ -439,14 +361,14 @@ class TestProjectInviteRequired(cloudstackTestCase):
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
- cls.services["user"],
+ cls.testdata["user"],
admin=True,
domainid=cls.domain.id
)
@@ -490,7 +412,7 @@ class TestProjectInviteRequired(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -566,12 +488,13 @@ class TestProjectInviteRequiredTrue(cloudstackTestCase):
@classmethod
def setUpClass(cls):
+
cls.testClient = super(TestProjectInviteRequiredTrue, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
@@ -589,14 +512,14 @@ class TestProjectInviteRequiredTrue(cloudstackTestCase):
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
- cls.services["user"],
+ cls.testdata["user"],
admin=True,
domainid=cls.domain.id
)
@@ -640,7 +563,7 @@ class TestProjectInviteRequiredTrue(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -720,9 +643,9 @@ class TestProjectInviteTimeout(cloudstackTestCase):
cls.testClient = super(TestProjectInviteTimeout, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
@@ -746,21 +669,21 @@ class TestProjectInviteTimeout(cloudstackTestCase):
if not isinstance(configs, list):
raise unittest.SkipTest("The 'project.invite.timeout' is not found in global configs")
- elif int(configs[0].value) != cls.services["configs"]["project.invite.timeout"]:
+ elif int(configs[0].value) != cls.testdata["configs"]["project.invite.timeout"]:
raise unittest.SkipTest("'project.invite.timeout' should be: %s " %
- cls.services["configs"]["project.invite.timeout"])
+ cls.testdata["configs"]["project.invite.timeout"])
cls.config = configs[0]
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
- cls.services["user"],
+ cls.testdata["user"],
admin=True,
domainid=cls.domain.id
)
@@ -807,7 +730,7 @@ class TestProjectInviteTimeout(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -933,7 +856,7 @@ class TestProjectInviteTimeout(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -1064,7 +987,7 @@ class TestProjectInviteTimeout(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -1193,7 +1116,7 @@ class TestProjectInviteTimeout(cloudstackTestCase):
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -1312,15 +1235,15 @@ class TestProjectInviteTimeout(cloudstackTestCase):
config = configs[0]
self.assertEqual(
int(config.value),
- self.services["configs"]["project.invite.timeout"],
+ self.testdata["configs"]["project.invite.timeout"],
"'project.invite.timeout' should be %s" %
- self.services["configs"]["project.invite.timeout"]
+ self.testdata["configs"]["project.invite.timeout"]
)
# Create project as a domain admin
project = Project.create(
self.apiclient,
- self.services["project"],
+ self.testdata["project"],
account=self.account.name,
domainid=self.account.domainid
)
@@ -1367,7 +1290,7 @@ class TestProjectInviteTimeout(cloudstackTestCase):
# Fetch the latest mail sent to user
mail_content = fetch_latest_mail(
- self.services["mail_account"],
+ self.testdata["mail_account"],
from_mail=self.user.user[0].email
)
return
diff --git a/test/integration/component/test_ps_resource_limits_volume.py b/test/integration/component/test_ps_resource_limits_volume.py
index 1e86557..753f3f1 100644
--- a/test/integration/component/test_ps_resource_limits_volume.py
+++ b/test/integration/component/test_ps_resource_limits_volume.py
@@ -136,7 +136,7 @@ class TestPrimaryResourceLimitsVolume(cloudstackTestCase):
# upload volume and verify that the volume is uploaded
volume = Volume.upload(self.apiclient, self.services["configurableData"]["upload_volume"],
zoneid=self.zone.id, account=self.account.name,
- domainid=self.account.domainid, url="http://people.apache.org/~sanjeev/rajani-thin-volume.vhd")
+ domainid=self.account.domainid)
volume.wait_for_upload(self.apiclient)
volumes = Volume.list(self.apiclient, id=volume.id,
diff --git a/test/integration/component/test_redundant_router_cleanups.py b/test/integration/component/test_redundant_router_cleanups.py
index 04f24a5..34b1fb3 100644
--- a/test/integration/component/test_redundant_router_cleanups.py
+++ b/test/integration/component/test_redundant_router_cleanups.py
@@ -757,7 +757,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase):
master_router = routers[1]
backup_router = routers[0]
else:
- self.fail("Both the routers in RVR are in BackupState")
+ self.fail("Both the routers in RVR are in BackupState - CLOUDSTACK-9015")
Router.stop(
self.apiclient,
diff --git a/test/integration/component/test_rootvolume_resize.py b/test/integration/component/test_rootvolume_resize.py
index 59488e6..94fc53b 100644
--- a/test/integration/component/test_rootvolume_resize.py
+++ b/test/integration/component/test_rootvolume_resize.py
@@ -633,6 +633,8 @@ class TestResizeVolume(cloudstackTestCase):
result = self.chk_volume_resize(self.apiclient, vm)
if result:
try:
+ if 'kvm' in self.hypervisor.lower():
+ self.virtual_machine.stop(self.apiclient)
virtualmachine_snapshot = VmSnapshot.create \
(self.apiclient, self.virtual_machine.id)
virtulmachine_snapshot_list = VmSnapshot. \
@@ -647,7 +649,7 @@ class TestResizeVolume(cloudstackTestCase):
virtulmachine_snapshot_list[0].id,
"Virtual Machine Snapshot id do not match")
except Exception as e:
- raise Exception("Exception while performing"
+ raise Exception("Issue CLOUDSTACK-10080: Exception while performing"
" vmsnapshot: %s" % e)
else:
self.debug("volume resize failed for root volume")
@@ -760,7 +762,7 @@ class TestResizeVolume(cloudstackTestCase):
restorerootvolume = list_restorevolume_response[0]
self.assertEqual(rootvolume.size, restorerootvolume.size,
"root volume and restore root"
- " volume size differs")
+ " volume size differs - CLOUDSTACK-10079")
except Exception as e:
raise Exception("Warning: Exception "
"during VM migration: %s" % e)
@@ -924,7 +926,7 @@ class TestResizeVolume(cloudstackTestCase):
rootvol = Volume(rootvolume.__dict__)
newsize = (rootvolume.size >> 30) - 1
success = False
- if rootvolume is not None:
+ if rootvolume is not None and 'vmware' in vm.hypervisor.lower():
try:
rootvol.resize(self.apiclient, size=newsize)
except Exception as e:
@@ -935,6 +937,8 @@ class TestResizeVolume(cloudstackTestCase):
raise Exception("Warning: Exception "
"during executing test resize"
" volume with less value : %s" % e)
+ if rootvol is not None and 'kvm' or 'xenserver' in vm.hypervisor.lower():
+ rootvol.resize(self.apiclient, size=newsize)
@attr(tags=["advanced"], required_hrdware="true")
def test_7_usage_events_after_rootvolume_resized_(self):
diff --git a/test/integration/component/test_routers.py b/test/integration/component/test_routers.py
index 3ea4a7c..45e2853 100644
--- a/test/integration/component/test_routers.py
+++ b/test/integration/component/test_routers.py
@@ -1055,6 +1055,8 @@ class TestRouterStopCreateFW(cloudstackTestCase):
cls.services["account"],
domainid=cls.domain.id
)
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
+
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
@@ -1219,13 +1221,13 @@ class TestRouterStopCreateFW(cloudstackTestCase):
)
self.assertEqual(
fw_rules[0].startport,
- str(self.services["fwrule"]["startport"]),
+ self.services["fwrule"]["startport"],
"Check start port of firewall rule"
)
self.assertEqual(
fw_rules[0].endport,
- str(self.services["fwrule"]["endport"]),
+ self.services["fwrule"]["endport"],
"Check end port of firewall rule"
)
# For DNS and DHCP check 'dnsmasq' process status
@@ -1250,15 +1252,14 @@ class TestRouterStopCreateFW(cloudstackTestCase):
True,
"Check for list hosts response return valid data"
)
+
host = hosts[0]
- host.user = self.services["configurableData"]["host"]["username"]
- host.passwd = self.services["configurableData"]["host"]["password"]
try:
result = get_process_status(
host.ipaddress,
22,
- host.user,
- host.passwd,
+ self.hostConfig['username'],
+ self.hostConfig['password'],
router.linklocalip,
'iptables -t nat -L'
)
diff --git a/test/integration/component/test_secsr_mount.py b/test/integration/component/test_secsr_mount.py
index d19c36b..71b45cc 100644
--- a/test/integration/component/test_secsr_mount.py
+++ b/test/integration/component/test_secsr_mount.py
@@ -67,6 +67,7 @@ class TestSecSRMount(cloudstackTestCase):
"timeout": 10,
}
+
def tearDown(self):
try:
# Clean up, terminate the created templates
@@ -80,7 +81,7 @@ class TestSecSRMount(cloudstackTestCase):
def isOnlyLocalStorageAvailable(self):
if not self.zone.localstorageenabled:
self.skipTest("Local Storage not enabled")
-
+
storage_pools = StoragePool.list(
self.apiclient,
zoneid=self.zone.id,
diff --git a/test/integration/component/test_ss_limits.py b/test/integration/component/test_ss_limits.py
index 0c8aae3..bd61398 100644
--- a/test/integration/component/test_ss_limits.py
+++ b/test/integration/component/test_ss_limits.py
@@ -165,6 +165,7 @@ class TestSecondaryStorageLimits(cloudstackTestCase):
except Exception as e:
self.fail("Failed to register template: %s" % e)
+ time.sleep(120)
templates = Template.list(apiclient,
templatefilter=\
self.services["template_2"]["templatefilter"],
@@ -243,6 +244,7 @@ class TestSecondaryStorageLimits(cloudstackTestCase):
except Exception as e:
self.fail("Failed to create template: %s" % e)
+ time.sleep(120)
templates = Template.list(apiclient,
templatefilter=\
self.services["template_2"]["templatefilter"],
diff --git a/test/integration/component/test_template_from_snapshot_with_template_details b/test/integration/component/test_template_from_snapshot_with_template_details.py
similarity index 96%
rename from test/integration/component/test_template_from_snapshot_with_template_details
rename to test/integration/component/test_template_from_snapshot_with_template_details.py
index 3b56713..4e20261 100644
--- a/test/integration/component/test_template_from_snapshot_with_template_details
+++ b/test/integration/component/test_template_from_snapshot_with_template_details.py
@@ -179,7 +179,6 @@ class TestCreateTemplate(cloudstackTestCase):
@attr(tags=["advanced", "advancedns"], required_hardware="true")
def test_01_create_template_snampshot(self):
-
builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
self.services["templates"][0]["url"] = builtin_info[0]
self.services["templates"][0]["hypervisor"] = builtin_info[1]
@@ -337,22 +336,6 @@ class TestCreateTemplate(cloudstackTestCase):
"Check VM avaliable in List Virtual Machines"
)
- list_vm_response = VirtualMachine.list(
- self.apiclient,
- id=self.small_virtual_machine.id
- )
- self.assertEqual(
- isinstance(list_vm_response, list),
- True,
- "Check list response returns a valid list"
- )
-
- self.assertNotEqual(
- len(list_vm_response),
- 0,
- "Check VM avaliable in List Virtual Machines"
- )
-
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py
index 0dccc46..9fce1ef 100644
--- a/test/integration/component/test_volumes.py
+++ b/test/integration/component/test_volumes.py
@@ -21,7 +21,8 @@ from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (listHypervisorCapabilities,
attachIso,
- deleteVolume)
+ deleteVolume
+ )
from marvin.lib.utils import cleanup_resources, validateList
from marvin.lib.base import (Account,
ServiceOffering,
@@ -37,83 +38,26 @@ from marvin.lib.common import (get_domain,
get_zone,
get_template,
get_pod,
- find_storage_pool_type)
+ find_storage_pool_type,
+ update_resource_limit)
from marvin.codes import PASS
# Import System modules
import time
-class Services:
-
- """Test Volume Services
- """
-
- def __init__(self):
- self.services = {
- "account": {
- "email": "test@test.com",
- "firstname": "Test",
- "lastname": "User",
- "username": "test",
- # Random characters are appended for unique
- # username
- "password": "password",
- },
- "service_offering": {
- "name": "Tiny Instance",
- "displaytext": "Tiny Instance",
- "cpunumber": 1,
- "cpuspeed": 100, # in MHz
- "memory": 128, # In MBs
- },
- "disk_offering": {
- "displaytext": "Small",
- "name": "Small",
- "disksize": 1
- },
- "volume": {
- "diskname": "TestDiskServ",
- },
- "virtual_machine": {
- "displayname": "testVM",
- "hypervisor": 'XenServer',
- "protocol": 'TCP',
- "ssh_port": 22,
- "username": "root",
- "password": "password",
- "privateport": 22,
- "publicport": 22,
- },
- "iso": # ISO settings for Attach/Detach ISO tests
- {
- "displaytext": "Test ISO",
- "name": "testISO",
- "url": "http://people.apache.org/~tsp/dummy.iso",
- # Source URL where ISO is located
- "ostype": 'CentOS 5.3 (64-bit)',
- },
- "custom_volume": {
- "customdisksize": 2,
- "diskname": "Custom disk",
- },
- "sleep": 50,
- "ostype": 'CentOS 5.3 (64-bit)',
- }
-
-
class TestAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAttachVolume, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
- cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.api_client, cls.zone.id)
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -123,17 +67,14 @@ class TestAttachVolume(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"]
+ cls.testdata["disk_offering"]
)
cls._cleanup.append(cls.disk_offering)
- template = get_template(
+ cls.template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
# get max data volumes limit based on the hypervisor type and version
listHost = Host.list(
cls.api_client,
@@ -151,27 +92,43 @@ class TestAttachVolume(cloudstackTestCase):
if res[i].hypervisorversion == ver:
break
cls.max_data_volumes = int(res[i].maxdatavolumeslimit)
+ if 'kvm' in cls.hypervisor:
+ cls.max_data_volumes = 24
cls.debug('max data volumes:{}'.format(cls.max_data_volumes))
- cls.services["volume"]["max"] = cls.max_data_volumes
+ cls.testdata["volume"]["max"] = cls.max_data_volumes
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
+ update_resource_limit(
+ cls.api_client,
+ 2, # Instance
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ max=cls.max_data_volumes + 1
+ )
+
cls._cleanup.append(cls.account)
+ cls.debug('max data volumes:{}'.format(cls.max_data_volumes))
+ #cls.services["volume"]["max"] = cls.max_data_volumes
+ # Create VMs, NAT Rules etc
+
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
- cls.services["virtual_machine"],
+ cls.testdata["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
+ templateid=cls.template.id,
+ zoneid=cls.zone.id
)
def setUp(self):
@@ -213,111 +170,109 @@ class TestAttachVolume(cloudstackTestCase):
# 5. Start The VM. Start VM should be successful
# Create 5 volumes and attach to VM
- try:
- for i in range(self.max_data_volumes):
- volume = Volume.create(
- self.apiclient,
- self.services["volume"],
- zoneid=self.zone.id,
- account=self.account.name,
- domainid=self.account.domainid,
- diskofferingid=self.disk_offering.id
- )
- # Check List Volume response for newly created volume
- list_volume_response = Volume.list(
- self.apiclient,
- id=volume.id
- )
- self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes"
- )
- # Attach volume to VM
- self.virtual_machine.attach_volume(
- self.apiclient,
- volume
- )
- # Check all volumes attached to same VM
+
+ for i in range(self.max_data_volumes):
+ volume = Volume.create(
+ self.apiclient,
+ self.testdata["volume"],
+ zoneid=self.zone.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ diskofferingid=self.disk_offering.id
+ )
+ # Check List Volume response for newly created volume
list_volume_response = Volume.list(
self.apiclient,
- virtualmachineid=self.virtual_machine.id,
- type='DATADISK',
- listall=True
+ id=volume.id
)
self.assertNotEqual(
list_volume_response,
None,
- "Check if volume exists in ListVolumes")
- self.assertEqual(
- isinstance(list_volume_response, list),
- True,
- "Check list volumes response for valid list")
- self.assertEqual(
- len(list_volume_response),
- self.max_data_volumes,
- "Volumes attached to the VM %s. Expected %s" %
- (len(list_volume_response),
- self.max_data_volumes))
- self.debug("Rebooting the VM: %s" % self.virtual_machine.id)
- # Reboot VM
- self.virtual_machine.reboot(self.apiclient)
-
- vm_response = VirtualMachine.list(
- self.apiclient,
- id=self.virtual_machine.id,
- )
- # Verify VM response to check whether VM deployment was successful
- self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
- self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
+ "Check if volume exists in ListVolumes"
)
- vm = vm_response[0]
- self.assertEqual(
- vm.state,
- 'Running',
- "Check the state of VM"
+ # Attach volume to VM
+ self.virtual_machine.attach_volume(
+ self.apiclient,
+ volume
)
+ # Check all volumes attached to same VM
+ list_volume_response = Volume.list(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ type='DATADISK',
+ listall=True
+ )
+ self.assertNotEqual(
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes")
+ self.assertEqual(
+ isinstance(list_volume_response, list),
+ True,
+ "Check list volumes response for valid list")
+ self.assertEqual(
+ len(list_volume_response),
+ self.max_data_volumes,
+ "Volumes attached to the VM %s. Expected %s" %
+ (len(list_volume_response),
+ self.max_data_volumes))
+ self.debug("Rebooting the VM: %s" % self.virtual_machine.id)
+ # Reboot VM
+ self.virtual_machine.reboot(self.apiclient)
+
+ vm_response = VirtualMachine.list(
+ self.apiclient,
+ id=self.virtual_machine.id,
+ )
+ # Verify VM response to check whether VM deployment was successful
+ self.assertNotEqual(
+ len(vm_response),
+ 0,
+ "Check VMs available in List VMs response"
+ )
+ self.assertEqual(
+ isinstance(vm_response, list),
+ True,
+ "Check list VM response for valid list"
+ )
+ vm = vm_response[0]
+ self.assertEqual(
+ vm.state,
+ 'Running',
+ "Check the state of VM"
+ )
- # Stop VM
- self.virtual_machine.stop(self.apiclient)
+ # Stop VM
+ self.virtual_machine.stop(self.apiclient)
- # Start VM
- self.virtual_machine.start(self.apiclient)
- # Sleep to ensure that VM is in ready state
- time.sleep(self.services["sleep"])
+ # Start VM
+ self.virtual_machine.start(self.apiclient)
+ # Sleep to ensure that VM is in ready state
+ time.sleep(self.testdata["sleep"])
- vm_response = VirtualMachine.list(
- self.apiclient,
- id=self.virtual_machine.id,
- )
- self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
- )
+ vm_response = VirtualMachine.list(
+ self.apiclient,
+ id=self.virtual_machine.id,
+ )
+ self.assertEqual(
+ isinstance(vm_response, list),
+ True,
+ "Check list VM response for valid list"
+ )
- # Verify VM response to check whether VM deployment was successful
- self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
+ # Verify VM response to check whether VM deployment was successful
+ self.assertNotEqual(
+ len(vm_response),
+ 0,
+ "Check VMs available in List VMs response"
+ )
- vm = vm_response[0]
- self.assertEqual(
- vm.state,
- 'Running',
- "Check the state of VM"
- )
- except Exception as e:
- self.fail("Exception occured: %s" % e)
+ vm = vm_response[0]
+ self.assertEqual(
+ vm.state,
+ 'Running',
+ "Check the state of VM"
+ )
return
@attr(tags=["advanced", "advancedns"])
@@ -330,43 +285,42 @@ class TestAttachVolume(cloudstackTestCase):
# 2. Attach volume should fail
# Create a volume and attach to VM
- volume = Volume.create(
- self.apiclient,
- self.services["volume"],
- zoneid=self.zone.id,
+
+ # Update limit so that account could create one more volume
+ if 'kvm' in self.hypervisor:
+ update_resource_limit(
+ self.api_client,
+ 2, # Instance
account=self.account.name,
domainid=self.account.domainid,
- diskofferingid=self.disk_offering.id
- )
- self.debug("Created volume: %s for account: %s" % (
- volume.id,
- self.account.name
- ))
- # Check List Volume response for newly created volume
- list_volume_response = Volume.list(
- self.apiclient,
- id=volume.id
- )
- self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes"
- )
- self.assertEqual(
- isinstance(list_volume_response, list),
- True,
- "Check list volumes response for valid list"
+ max=32
)
# Attach volume to VM
with self.assertRaises(Exception):
- self.debug("Trying to Attach volume: %s to VM: %s" % (
- volume.id,
- self.virtual_machine.id
- ))
- self.virtual_machine.attach_volume(
- self.apiclient,
- volume
- )
+ for i in range(self.max_data_volumes):
+ volume = Volume.create(
+ self.apiclient,
+ self.testdata["volume"],
+ zoneid=self.zone.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ diskofferingid=self.disk_offering.id
+ )
+ # Check List Volume response for newly created volume
+ list_volume_response = Volume.list(
+ self.apiclient,
+ id=volume.id
+ )
+ self.assertNotEqual(
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes"
+ )
+ # Attach volume to VM
+ self.virtual_machine.attach_volume(
+ self.apiclient,
+ volume
+ )
return
@@ -377,12 +331,12 @@ class TestAttachDetachVolume(cloudstackTestCase):
cls.testClient = super(TestAttachDetachVolume, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.api_client, cls.zone.id)
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -392,17 +346,17 @@ class TestAttachDetachVolume(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"]
+ cls.testdata["disk_offering"]
)
cls._cleanup.append(cls.disk_offering)
template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
+ cls.testdata["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["template"] = template.id
# get max data volumes limit based on the hypervisor type and version
listHost = Host.list(
cls.api_client,
@@ -420,24 +374,33 @@ class TestAttachDetachVolume(cloudstackTestCase):
if res[i].hypervisorversion == ver:
break
cls.max_data_volumes = int(res[i].maxdatavolumeslimit)
+ if 'kvm' in cls.hypervisor:
+ cls.max_data_volumes = 24
cls.debug('max data volumes:{}'.format(cls.max_data_volumes))
- cls.services["volume"]["max"] = cls.max_data_volumes
+ cls.testdata["volume"]["max"] = cls.max_data_volumes
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
+ update_resource_limit(
+ cls.api_client,
+ 2, # Instance
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ max=cls.max_data_volumes + 1
+ )
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
- cls.services["virtual_machine"],
+ cls.testdata["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
@@ -480,130 +443,128 @@ class TestAttachDetachVolume(cloudstackTestCase):
# 5. Stop the VM. Stop VM should be successful
# 6. Start The VM. Start VM should be successful
- try:
- volumes = []
- # Create 5 volumes and attach to VM
- for i in range(self.max_data_volumes):
- volume = Volume.create(
- self.apiclient,
- self.services["volume"],
- zoneid=self.zone.id,
- account=self.account.name,
- domainid=self.account.domainid,
- diskofferingid=self.disk_offering.id
- )
- self.cleanup.append(volume)
- volumes.append(volume)
-
- # Check List Volume response for newly created volume
- list_volume_response = Volume.list(
- self.apiclient,
- id=volume.id
- )
- self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes")
- self.assertEqual(
- isinstance(list_volume_response, list),
- True,
- "Check list volumes response for valid list")
- # Attach volume to VM
- self.virtual_machine.attach_volume(
- self.apiclient,
- volume
- )
+ volumes = []
+ # Create 5 volumes and attach to VM
+ for i in range(self.max_data_volumes):
+ volume = Volume.create(
+ self.apiclient,
+ self.testdata["volume"],
+ zoneid=self.zone.id,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ diskofferingid=self.disk_offering.id
+ )
+ self.cleanup.append(volume)
+ volumes.append(volume)
- # Check all volumes attached to same VM
+ # Check List Volume response for newly created volume
list_volume_response = Volume.list(
self.apiclient,
- virtualmachineid=self.virtual_machine.id,
- type='DATADISK',
- listall=True
+ id=volume.id
)
self.assertNotEqual(
list_volume_response,
None,
- "Check if volume exists in ListVolumes"
- )
+ "Check if volume exists in ListVolumes")
self.assertEqual(
isinstance(list_volume_response, list),
True,
- "Check list volumes response for valid list"
+ "Check list volumes response for valid list")
+ # Attach volume to VM
+ self.virtual_machine.attach_volume(
+ self.apiclient,
+ volume
)
- self.assertEqual(
- len(list_volume_response),
- self.max_data_volumes,
- "Volumes attached to the VM %s. Expected %s" %
- (len(list_volume_response),
- self.max_data_volumes))
-
- # Detach all volumes from VM
- for volume in volumes:
- self.virtual_machine.detach_volume(
- self.apiclient,
- volume
- )
- # Reboot VM
- self.debug("Rebooting the VM: %s" % self.virtual_machine.id)
- self.virtual_machine.reboot(self.apiclient)
- # Sleep to ensure that VM is in ready state
- time.sleep(self.services["sleep"])
- vm_response = VirtualMachine.list(
+ # Check all volumes attached to same VM
+ list_volume_response = Volume.list(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ type='DATADISK',
+ listall=True
+ )
+ self.assertNotEqual(
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes"
+ )
+ self.assertEqual(
+ isinstance(list_volume_response, list),
+ True,
+ "Check list volumes response for valid list"
+ )
+ self.assertEqual(
+ len(list_volume_response),
+ self.max_data_volumes,
+ "Volumes attached to the VM %s. Expected %s" %
+ (len(list_volume_response),
+ self.max_data_volumes))
+
+ # Detach all volumes from VM
+ for volume in volumes:
+ self.virtual_machine.detach_volume(
self.apiclient,
- id=self.virtual_machine.id,
- )
- # Verify VM response to check whether VM deployment was successful
- self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
+ volume
)
+ # Reboot VM
+ self.debug("Rebooting the VM: %s" % self.virtual_machine.id)
+ self.virtual_machine.reboot(self.apiclient)
+ # Sleep to ensure that VM is in ready state
+ time.sleep(self.testdata["sleep"])
- self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
- vm = vm_response[0]
- self.assertEqual(
- vm.state,
- 'Running',
- "Check the state of VM"
- )
+ vm_response = VirtualMachine.list(
+ self.apiclient,
+ id=self.virtual_machine.id,
+ )
+ # Verify VM response to check whether VM deployment was successful
+ self.assertEqual(
+ isinstance(vm_response, list),
+ True,
+ "Check list VM response for valid list"
+ )
- # Stop VM
- self.virtual_machine.stop(self.apiclient)
+ self.assertNotEqual(
+ len(vm_response),
+ 0,
+ "Check VMs available in List VMs response"
+ )
+ vm = vm_response[0]
+ self.assertEqual(
+ vm.state,
+ 'Running',
+ "Check the state of VM"
+ )
- # Start VM
- self.virtual_machine.start(self.apiclient)
- # Sleep to ensure that VM is in ready state
- time.sleep(self.services["sleep"])
+ # Stop VM
+ self.virtual_machine.stop(self.apiclient)
+
+ # Start VM
+ self.virtual_machine.start(self.apiclient)
+ # Sleep to ensure that VM is in ready state
+ time.sleep(self.testdata["sleep"])
+
+ vm_response = VirtualMachine.list(
+ self.apiclient,
+ id=self.virtual_machine.id,
+ )
+ # Verify VM response to check whether VM deployment was successful
+ self.assertEqual(
+ isinstance(vm_response, list),
+ True,
+ "Check list VM response for valid list"
+ )
+ self.assertNotEqual(
+ len(vm_response),
+ 0,
+ "Check VMs available in List VMs response"
+ )
+ vm = vm_response[0]
+ self.assertEqual(
+ vm.state,
+ 'Running',
+ "Check the state of VM"
+ )
- vm_response = VirtualMachine.list(
- self.apiclient,
- id=self.virtual_machine.id,
- )
- # Verify VM response to check whether VM deployment was successful
- self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
- )
- self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
- vm = vm_response[0]
- self.assertEqual(
- vm.state,
- 'Running',
- "Check the state of VM"
- )
- except Exception as e:
- self.fail("Exception occurred: %s" % e)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
@@ -626,8 +587,7 @@ class TestAttachDetachVolume(cloudstackTestCase):
or self.hypervisor.lower() == 'kvm'
or self.hypervisor.lower() == 'simulator'
or self.hypervisor.lower() == 'xenserver'):
-
- try:
+
# Check for root volume
root_volume_response = Volume.list(
self.apiclient,
@@ -635,45 +595,45 @@ class TestAttachDetachVolume(cloudstackTestCase):
type='ROOT',
listall=True
)
-
+
self.assertEqual(
validateList(root_volume_response)[0],
PASS,
"Invalid response returned for root volume list"
)
-
+
# Grab the root volume for later use
root_volume = root_volume_response[0]
-
+
# Stop VM
self.debug("Stopping the VM: %s" % self.virtual_machine.id)
self.virtual_machine.stop(self.apiclient)
-
+
vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
)
-
+
# Ensure that vm_response is a valid list
self.assertEqual(
validateList(vm_response)[0],
PASS,
"Invalid response returned for vm_response list"
)
-
+
vm = vm_response[0]
self.assertEqual(
vm.state,
'Stopped',
"Check the state of VM"
)
-
+
# Detach root volume from VM
self.virtual_machine.detach_volume(
self.apiclient,
root_volume
)
-
+
# Verify that root disk is gone
no_root_volume_response = Volume.list(
self.apiclient,
@@ -681,20 +641,20 @@ class TestAttachDetachVolume(cloudstackTestCase):
type='ROOT',
listall=True
)
-
+
self.assertEqual(
no_root_volume_response,
None,
"Check if root volume exists in ListVolumes"
)
-
+
# Attach root volume to VM
self.virtual_machine.attach_volume(
self.apiclient,
root_volume,
0
)
-
+
# Check for root volume
new_root_volume_response = Volume.list(
self.apiclient,
@@ -702,29 +662,29 @@ class TestAttachDetachVolume(cloudstackTestCase):
type='ROOT',
listall=True
)
-
+
# Ensure that new_root_volume_response is a valid list
self.assertEqual(
validateList(new_root_volume_response)[0],
PASS,
"Invalid response returned for new_root_volume_response list"
)
-
+
# Start VM
self.virtual_machine.start(self.apiclient)
-
+
vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
)
-
+
# Verify VM response to check whether VM deployment was successful
self.assertEqual(
validateList(vm_response)[0],
PASS,
"Invalid response returned for vm_response list during VM start up"
)
-
+
vm = vm_response[0]
self.assertEqual(
vm.state,
@@ -732,9 +692,6 @@ class TestAttachDetachVolume(cloudstackTestCase):
"Ensure the state of VM is running"
)
- except Exception as e:
- self.fail("Exception occurred: %s" % e)
-
else:
self.skipTest("Root Volume attach/detach is not supported on %s " % self.hypervisor)
return
@@ -747,12 +704,12 @@ class TestAttachVolumeISO(cloudstackTestCase):
cls.testClient = super(TestAttachVolumeISO, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.api_client, cls.zone.id)
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -762,18 +719,18 @@ class TestAttachVolumeISO(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"]
+ cls.testdata["disk_offering"]
)
cls._cleanup.append(cls.disk_offering)
template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["iso"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
+ cls.testdata["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["iso"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["template"] = template.id
# get max data volumes limit based on the hypervisor type and version
listHost = Host.list(
cls.api_client,
@@ -791,24 +748,37 @@ class TestAttachVolumeISO(cloudstackTestCase):
if res[i].hypervisorversion == ver:
break
cls.max_data_volumes = int(res[i].maxdatavolumeslimit)
+ if 'kvm' in cls.hypervisor:
+ cls.max_data_volumes = 24
cls.debug('max data volumes:{}'.format(cls.max_data_volumes))
- cls.services["volume"]["max"] = cls.max_data_volumes
+ cls.testdata["volume"]["max"] = cls.max_data_volumes
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
+ update_resource_limit(
+ cls.api_client,
+ 2, # Instance
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ max=cls.max_data_volumes + 1
+ )
+ cls.debug('max data volumes:{}'.format(cls.max_data_volumes))
+ cls.testdata["volume"]["max"] = cls.max_data_volumes
+ # Create VMs, NAT Rules etc
+
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
- cls.services["virtual_machine"],
+ cls.testdata["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
@@ -854,7 +824,7 @@ class TestAttachVolumeISO(cloudstackTestCase):
for i in range(self.max_data_volumes):
volume = Volume.create(
self.apiclient,
- self.services["volume"],
+ self.testdata["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
@@ -911,7 +881,7 @@ class TestAttachVolumeISO(cloudstackTestCase):
# Create an ISO and attach it to VM
iso = Iso.create(
self.apiclient,
- self.services["iso"],
+ self.testdata["iso"],
account=self.account.name,
domainid=self.account.domainid,
)
@@ -970,11 +940,11 @@ class TestVolumes(cloudstackTestCase):
cls.testClient = super(TestVolumes, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -984,36 +954,36 @@ class TestVolumes(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"]
+ cls.testdata["disk_offering"]
)
cls._cleanup.append(cls.disk_offering)
template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
- cls.services["virtual_machine"][
+ cls.testdata["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["template"] = template.id
+ cls.testdata["virtual_machine"][
"diskofferingid"] = cls.disk_offering.id
# Create VMs, VMs etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
- cls.services["virtual_machine"],
+ cls.testdata["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
@@ -1021,7 +991,7 @@ class TestVolumes(cloudstackTestCase):
cls.volume = Volume.create(
cls.api_client,
- cls.services["volume"],
+ cls.testdata["volume"],
zoneid=cls.zone.id,
account=cls.account.name,
domainid=cls.account.domainid,
@@ -1148,7 +1118,7 @@ class TestVolumes(cloudstackTestCase):
self.virtual_machine.detach_volume(self.apiclient, self.volume)
# Sleep to ensure the current state will reflected in other calls
- time.sleep(self.services["sleep"])
+ time.sleep(self.testdata["sleep"])
list_volume_response = Volume.list(
self.apiclient,
@@ -1192,7 +1162,7 @@ class TestVolumes(cloudstackTestCase):
self.apiclient.deleteVolume(cmd)
# Sleep to ensure the current state will reflected in other calls
- time.sleep(self.services["sleep"])
+ time.sleep(self.testdata["sleep"])
list_volume_response = Volume.list(
self.apiclient,
@@ -1234,7 +1204,7 @@ class TestVolumes(cloudstackTestCase):
domuser = Account.create(
apiclient=self.apiclient,
- services=self.services["account"],
+ services=self.testdata["account"],
admin=False,
domainid=dom.id
)
@@ -1257,7 +1227,7 @@ class TestVolumes(cloudstackTestCase):
vol = Volume.create(
domapiclient,
- services=self.services["volume"],
+ services=self.testdata["volume"],
zoneid=self.zone.id,
account=domuser.name,
domainid=dom.id,
@@ -1291,11 +1261,11 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase):
TestDeployVmWithCustomDisk,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -1305,30 +1275,30 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"],
+ cls.testdata["disk_offering"],
custom=True
)
cls._cleanup.append(cls.disk_offering)
template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
+ cls.testdata["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
@@ -1384,12 +1354,12 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase):
self.debug("custom.diskoffering.size.max: %s" % max_size)
self.debug("Creating a volume with size less than min cust disk size")
- self.services["custom_volume"]["customdisksize"] = (min_size - 1)
- self.services["custom_volume"]["zoneid"] = self.zone.id
+ self.testdata["custom_volume"]["customdisksize"] = (min_size - 1)
+ self.testdata["custom_volume"]["zoneid"] = self.zone.id
with self.assertRaises(Exception):
Volume.create_custom_disk(
self.apiclient,
- self.services["custom_volume"],
+ self.testdata["custom_volume"],
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
@@ -1397,11 +1367,11 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase):
self.debug("Create volume failed!")
self.debug("Creating a volume with size more than max cust disk size")
- self.services["custom_volume"]["customdisksize"] = (max_size + 1)
+ self.testdata["custom_volume"]["customdisksize"] = (max_size + 1)
with self.assertRaises(Exception):
Volume.create_custom_disk(
self.apiclient,
- self.services["custom_volume"],
+ self.testdata["custom_volume"],
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
@@ -1411,18 +1381,16 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase):
self.debug("Creating a volume with size more than min cust disk " +
"but less than max cust disk size"
)
- self.services["custom_volume"]["customdisksize"] = (min_size + 1)
- try:
- Volume.create_custom_disk(
- self.apiclient,
- self.services["custom_volume"],
- account=self.account.name,
- domainid=self.account.domainid,
- diskofferingid=self.disk_offering.id
- )
- self.debug("Create volume of cust disk size succeeded")
- except Exception as e:
- self.fail("Create volume failed with exception: %s" % e)
+ self.testdata["custom_volume"]["customdisksize"] = (min_size + 1)
+ Volume.create_custom_disk(
+ self.apiclient,
+ self.testdata["custom_volume"],
+ account=self.account.name,
+ domainid=self.account.domainid,
+ diskofferingid=self.disk_offering.id
+ )
+ self.debug("Create volume of cust disk size succeeded")
+
return
@@ -1433,11 +1401,11 @@ class TestMigrateVolume(cloudstackTestCase):
cls.testClient = super(TestMigrateVolume, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
+ cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.services['mode'] = cls.zone.networktype
+ cls.testdata['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedStorageType = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -1447,36 +1415,36 @@ class TestMigrateVolume(cloudstackTestCase):
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
- cls.services["disk_offering"]
+ cls.testdata["disk_offering"]
)
template = get_template(
cls.api_client,
cls.zone.id,
- cls.services["ostype"]
+ cls.testdata["ostype"]
)
- cls.services["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = template.id
- cls.services["virtual_machine"][
+ cls.testdata["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.testdata["virtual_machine"]["template"] = template.id
+ cls.testdata["virtual_machine"][
"diskofferingid"] = cls.disk_offering.id
# Create VMs, VMs etc
cls.account = Account.create(
cls.api_client,
- cls.services["account"],
+ cls.testdata["account"],
domainid=cls.domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
- cls.services["service_offering"]
+ cls.testdata["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
- cls.services["virtual_machine"],
+ cls.testdata["virtual_machine"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
- mode=cls.services["mode"]
+ mode=cls.testdata["mode"]
)
cls._cleanup = [
cls.small_offering,
@@ -1519,7 +1487,7 @@ class TestMigrateVolume(cloudstackTestCase):
"""
vol = Volume.create(
self.apiclient,
- self.services["volume"],
+ self.testdata["volume"],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id,
account=self.account.name,
@@ -1535,13 +1503,11 @@ class TestMigrateVolume(cloudstackTestCase):
PASS,
"Invalid response returned for list volumes")
vol_uuid = vol_res[0].id
- try:
- self.virtual_machine.attach_volume(
- self.apiclient,
- vol
- )
- except Exception as e:
- self.fail("Attaching data disk to vm failed with error %s" % e)
+ self.virtual_machine.attach_volume(
+ self.apiclient,
+ vol
+ )
+
pools = StoragePool.listForMigration(
self.apiclient,
id=vol.id
@@ -1556,15 +1522,13 @@ class TestMigrateVolume(cloudstackTestCase):
"invalid pool response from findStoragePoolsForMigration")
pool = pools[0]
self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id))
- try:
- Volume.migrate(
- self.apiclient,
- volumeid=vol.id,
- storageid=pool.id,
- livemigrate='true'
- )
- except Exception as e:
- self.fail("Volume migration failed with error %s" % e)
+ Volume.migrate(
+ self.apiclient,
+ volumeid=vol.id,
+ storageid=pool.id,
+ livemigrate='true'
+ )
+
migrated_vols = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
diff --git a/test/integration/component/test_vpc.py b/test/integration/component/test_vpc.py
index 335a4c9..2587d78 100644
--- a/test/integration/component/test_vpc.py
+++ b/test/integration/component/test_vpc.py
@@ -2517,6 +2517,8 @@ class TestVPC(cloudstackTestCase):
self.cleanup.append(vpnGw)
except Exception as e:
self.fail("Creating vpn customer gateway with hostname\
+ PR: https://github.com/apache/cloudstack/pull/955\
+ JIRA: https://issues.apache.org/jira/browse/CLOUDSTACK-8969\
Failed with error :%s" % e)
vpn_cgw_res = VpnCustomerGateway.list(
self.apiclient,
diff --git a/test/integration/component/test_vpc_vm_life_cycle.py b/test/integration/component/test_vpc_vm_life_cycle.py
index 6dc79cd..ae8ee2b 100644
--- a/test/integration/component/test_vpc_vm_life_cycle.py
+++ b/test/integration/component/test_vpc_vm_life_cycle.py
@@ -2683,226 +2683,222 @@ class TestVMLifeCycleDiffHosts(cloudstackTestCase):
@classmethod
def setUpClass(cls):
- try:
-
- cls.testClient = super(TestVMLifeCycleDiffHosts, cls).getClsTestClient()
- cls.api_client = cls.testClient.getApiClient()
+ cls.testClient = super(TestVMLifeCycleDiffHosts, cls).getClsTestClient()
+ cls.api_client = cls.testClient.getApiClient()
- cls.services = Services().services
- # Get Zone, Domain and templates
- cls.domain = get_domain(cls.api_client)
- cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
- cls.template = get_template(
- cls.api_client,
- cls.zone.id,
- cls.services["ostype"]
- )
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
- cls.services["virtual_machine"]["template"] = cls.template.id
+ cls.services = Services().services
+ # Get Zone, Domain and templates
+ cls.domain = get_domain(cls.api_client)
+ cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
+ cls.template = get_template(
+ cls.api_client,
+ cls.zone.id,
+ cls.services["ostype"]
+ )
+ cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.services["virtual_machine"]["template"] = cls.template.id
# 2 hosts are needed within cluster to run the test cases and
# 3rd host is needed to run the migrate test case
# Even if only 2 hosts are present, remaining test cases will be run and
# migrate test will be skipped automatically
- cluster = cls.FindClusterWithSufficientHosts(numberofhosts = 3)
- if cluster is None:
- raise unittest.SkipTest("Skipping as unable to find a cluster with\
- sufficient number of hosts")
+ cluster = cls.FindClusterWithSufficientHosts(numberofhosts = 3)
+ if cluster is None:
+ raise unittest.SkipTest("Skipping as unable to find a cluster with\
+ sufficient number of hosts")
- hosts = list_hosts(cls.api_client, type="Routing", listall=True, clusterid=cluster.id)
- assert isinstance(hosts, list), "list_hosts should return a list response,\
- instead got %s" % hosts
+ hosts = list_hosts(cls.api_client, type="Routing", listall=True, clusterid=cluster.id)
- Host.update(cls.api_client, id=hosts[0].id, hosttags="host1")
- Host.update(cls.api_client, id=hosts[1].id, hosttags="host2")
+ assert isinstance(hosts, list), "list_hosts should return a list response,\
+ instead got %s" % hosts
- if len(hosts) > 2:
- Host.update(cls.api_client, id=hosts[2].id, hosttags="host1")
+ Host.update(cls.api_client, id=hosts[0].id, hosttags="host1")
+ Host.update(cls.api_client, id=hosts[1].id, hosttags="host2")
- cls.service_offering_1 = ServiceOffering.create(
- cls.api_client,
- cls.services["service_offering_1"]
- )
- cls.service_offering_2 = ServiceOffering.create(
- cls.api_client,
- cls.services["service_offering_2"]
- )
+ if len(hosts) > 2:
+ Host.update(cls.api_client, id=hosts[2].id, hosttags="host1")
- cls.account = Account.create(
- cls.api_client,
- cls.services["account"],
- admin=True,
- domainid=cls.domain.id
- )
+ cls.service_offering_1 = ServiceOffering.create(
+ cls.api_client,
+ cls.services["service_offering_1"]
+ )
+ cls.service_offering_2 = ServiceOffering.create(
+ cls.api_client,
+ cls.services["service_offering_2"]
+ )
- cls.vpc_off = VpcOffering.create(
- cls.api_client,
- cls.services["vpc_offering"]
- )
+ cls.account = Account.create(
+ cls.api_client,
+ cls.services["account"],
+ admin=True,
+ domainid=cls.domain.id
+ )
- cls.vpc_off.update(cls.api_client, state='Enabled')
+ cls.vpc_off = VpcOffering.create(
+ cls.api_client,
+ cls.services["vpc_offering"]
+ )
- cls.services["vpc"]["cidr"] = '10.1.1.1/16'
- cls.vpc = VPC.create(
- cls.api_client,
- cls.services["vpc"],
- vpcofferingid=cls.vpc_off.id,
- zoneid=cls.zone.id,
- account=cls.account.name,
- domainid=cls.account.domainid
- )
+ cls.vpc_off.update(cls.api_client, state='Enabled')
- cls.nw_off = NetworkOffering.create(
- cls.api_client,
- cls.services["network_offering"],
- conservemode=False
- )
- # Enable Network offering
- cls.nw_off.update(cls.api_client, state='Enabled')
+ cls.services["vpc"]["cidr"] = '10.1.1.1/16'
+ cls.vpc = VPC.create(
+ cls.api_client,
+ cls.services["vpc"],
+ vpcofferingid=cls.vpc_off.id,
+ zoneid=cls.zone.id,
+ account=cls.account.name,
+ domainid=cls.account.domainid
+ )
- # Creating network using the network offering created
- cls.network_1 = Network.create(
- cls.api_client,
- cls.services["network"],
- accountid=cls.account.name,
- domainid=cls.account.domainid,
- networkofferingid=cls.nw_off.id,
- zoneid=cls.zone.id,
- gateway='10.1.1.1',
- vpcid=cls.vpc.id
- )
- cls.nw_off_no_lb = NetworkOffering.create(
- cls.api_client,
- cls.services["network_offering_no_lb"],
- conservemode=False
- )
- # Enable Network offering
- cls.nw_off_no_lb.update(cls.api_client, state='Enabled')
+ cls.nw_off = NetworkOffering.create(
+ cls.api_client,
+ cls.services["network_offering"],
+ conservemode=False
+ )
+ # Enable Network offering
+ cls.nw_off.update(cls.api_client, state='Enabled')
- # Creating network using the network offering created
- cls.network_2 = Network.create(
+ # Creating network using the network offering created
+ cls.network_1 = Network.create(
+ cls.api_client,
+ cls.services["network"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ networkofferingid=cls.nw_off.id,
+ zoneid=cls.zone.id,
+ gateway='10.1.1.1',
+ vpcid=cls.vpc.id
+ )
+ cls.nw_off_no_lb = NetworkOffering.create(
cls.api_client,
- cls.services["network"],
- accountid=cls.account.name,
- domainid=cls.account.domainid,
- networkofferingid=cls.nw_off_no_lb.id,
- zoneid=cls.zone.id,
- gateway='10.1.2.1',
- vpcid=cls.vpc.id
+ cls.services["network_offering_no_lb"],
+ conservemode=False
)
- # Spawn an instance in that network
- cls.vm_1 = VirtualMachine.create(
- cls.api_client,
- cls.services["virtual_machine"],
- accountid=cls.account.name,
- domainid=cls.account.domainid,
- serviceofferingid=cls.service_offering_1.id,
- networkids=[str(cls.network_1.id)]
- )
- # Spawn an instance in that network
- cls.vm_2 = VirtualMachine.create(
- cls.api_client,
- cls.services["virtual_machine"],
- accountid=cls.account.name,
- domainid=cls.account.domainid,
- serviceofferingid=cls.service_offering_1.id,
- networkids=[str(cls.network_1.id)]
- )
+ # Enable Network offering
+ cls.nw_off_no_lb.update(cls.api_client, state='Enabled')
- cls.vm_3 = VirtualMachine.create(
- cls.api_client,
- cls.services["virtual_machine"],
- accountid=cls.account.name,
- domainid=cls.account.domainid,
- serviceofferingid=cls.service_offering_2.id,
- networkids=[str(cls.network_2.id)]
- )
+ # Creating network using the network offering created
+ cls.network_2 = Network.create(
+ cls.api_client,
+ cls.services["network"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ networkofferingid=cls.nw_off_no_lb.id,
+ zoneid=cls.zone.id,
+ gateway='10.1.2.1',
+ vpcid=cls.vpc.id
+ )
+ # Spawn an instance in that network
+ cls.vm_1 = VirtualMachine.create(
+ cls.api_client,
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering_1.id,
+ networkids=[str(cls.network_1.id)]
+ )
+ # Spawn an instance in that network
+ cls.vm_2 = VirtualMachine.create(
+ cls.api_client,
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering_1.id,
+ networkids=[str(cls.network_1.id)]
+ )
- cls.public_ip_static = PublicIPAddress.create(
- cls.api_client,
- accountid=cls.account.name,
- zoneid=cls.zone.id,
- domainid=cls.account.domainid,
- networkid=cls.network_1.id,
- vpcid=cls.vpc.id
- )
- StaticNATRule.enable(
+ cls.vm_3 = VirtualMachine.create(
cls.api_client,
- ipaddressid=cls.public_ip_static.ipaddress.id,
- virtualmachineid=cls.vm_1.id,
- networkid=cls.network_1.id
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering_2.id,
+ networkids=[str(cls.network_2.id)]
)
- cls.public_ip_1 = PublicIPAddress.create(
- cls.api_client,
- accountid=cls.account.name,
- zoneid=cls.zone.id,
- domainid=cls.account.domainid,
- networkid=cls.network_1.id,
- vpcid=cls.vpc.id
- )
+ cls.public_ip_static = PublicIPAddress.create(
+ cls.api_client,
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ domainid=cls.account.domainid,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+ StaticNATRule.enable(
+ cls.api_client,
+ ipaddressid=cls.public_ip_static.ipaddress.id,
+ virtualmachineid=cls.vm_1.id,
+ networkid=cls.network_1.id
+ )
- cls.nat_rule = NATRule.create(
- cls.api_client,
- cls.vm_1,
- cls.services["natrule"],
- ipaddressid=cls.public_ip_1.ipaddress.id,
- openfirewall=False,
- networkid=cls.network_1.id,
- vpcid=cls.vpc.id
- )
+ cls.public_ip_1 = PublicIPAddress.create(
+ cls.api_client,
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ domainid=cls.account.domainid,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
- cls.public_ip_2 = PublicIPAddress.create(
+ cls.nat_rule = NATRule.create(
+ cls.api_client,
+ cls.vm_1,
+ cls.services["natrule"],
+ ipaddressid=cls.public_ip_1.ipaddress.id,
+ openfirewall=False,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+
+ cls.public_ip_2 = PublicIPAddress.create(
+ cls.api_client,
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ domainid=cls.account.domainid,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+
+ cls.lb_rule = LoadBalancerRule.create(
cls.api_client,
+ cls.services["lbrule"],
+ ipaddressid=cls.public_ip_2.ipaddress.id,
accountid=cls.account.name,
- zoneid=cls.zone.id,
- domainid=cls.account.domainid,
networkid=cls.network_1.id,
- vpcid=cls.vpc.id
+ vpcid=cls.vpc.id,
+ domainid=cls.account.domainid
+ )
+ cls.lb_rule.assign(cls.api_client, [cls.vm_1, cls.vm_2])
+
+ # Opening up the ports in VPC
+ cls.nwacl_nat = NetworkACL.create(
+ cls.api_client,
+ networkid=cls.network_1.id,
+ services=cls.services["natrule"],
+ traffictype='Ingress'
)
- cls.lb_rule = LoadBalancerRule.create(
+ cls.nwacl_lb = NetworkACL.create(
+ cls.api_client,
+ networkid=cls.network_1.id,
+ services=cls.services["lbrule"],
+ traffictype='Ingress'
+ )
+ cls.services["icmp_rule"]["protocol"] = "all"
+ cls.nwacl_internet = NetworkACL.create(
cls.api_client,
- cls.services["lbrule"],
- ipaddressid=cls.public_ip_2.ipaddress.id,
- accountid=cls.account.name,
networkid=cls.network_1.id,
- vpcid=cls.vpc.id,
- domainid=cls.account.domainid
- )
- cls.lb_rule.assign(cls.api_client, [cls.vm_1, cls.vm_2])
-
- # Opening up the ports in VPC
- cls.nwacl_nat = NetworkACL.create(
- cls.api_client,
- networkid=cls.network_1.id,
- services=cls.services["natrule"],
- traffictype='Ingress'
+ services=cls.services["icmp_rule"],
+ traffictype='Egress'
)
-
- cls.nwacl_lb = NetworkACL.create(
- cls.api_client,
- networkid=cls.network_1.id,
- services=cls.services["lbrule"],
- traffictype='Ingress'
- )
- cls.services["icmp_rule"]["protocol"] = "all"
- cls.nwacl_internet = NetworkACL.create(
- cls.api_client,
- networkid=cls.network_1.id,
- services=cls.services["icmp_rule"],
- traffictype='Egress'
- )
- cls._cleanup = [
- cls.service_offering_1,
- cls.service_offering_2,
- cls.nw_off,
- cls.nw_off_no_lb,
- ]
-
- except Exception as e:
- raise Exception("Warning: Exception during setup : %s" % e)
+ cls._cleanup = [
+ cls.service_offering_1,
+ cls.service_offering_2,
+ cls.nw_off,
+ cls.nw_off_no_lb,
+ ]
return
diff --git a/test/integration/component/test_vpn_service.py b/test/integration/component/test_vpn_service.py
index 8d27624..b5b886d 100644
--- a/test/integration/component/test_vpn_service.py
+++ b/test/integration/component/test_vpn_service.py
@@ -36,7 +36,7 @@ from marvin.lib.common import (get_domain,
get_template
)
from marvin.lib.utils import cleanup_resources
-
+import subprocess
class Services:
"""Test VPN Service
diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py
index 7e7a2fd..e9389e2 100644
--- a/tools/marvin/marvin/config/test_data.py
+++ b/tools/marvin/marvin/config/test_data.py
@@ -42,13 +42,12 @@ test_data = {
"displaytext": "Test project"
},
"publiciprange": {
- "gateway": "",
- "netmask": "",
- "startip": "",
- "endip": "",
+ "gateway": "10.6.0.254",
+ "netmask": "255.255.255.0",
+ "startip": "10.6.0.2",
+ "endip": "10.6.0.20",
"forvirtualnetwork": "true",
- "vlan": "",
- "zoneid": ""
+ "vlan": "300"
},
"private_gateway": {
"ipaddress": "172.16.1.2",
@@ -1131,12 +1130,9 @@ test_data = {
"cidrlist": '0.0.0.0/0',
},
"vpncustomergateway": {
- "ipsecpsk": "secreatKey",
- "ikepolicy": "aes128-sha1",
- "ikelifetime": "86400",
- "esppolicy": "aes128-sha1",
- "epslifetime": "3600",
- "dpd": "false"
+ "esppolicy": "3des-md5;modp1536",
+ "ikepolicy": "3des-md5;modp1536",
+ "ipsecpsk": "ipsecpsk"
},
"vlan_ip_range": {
"startip": "",
@@ -1939,7 +1935,7 @@ test_data = {
"bootable": True,
"ispublic": False,
"url": "http://dl.openvm.eu/cloudstack/iso/TinyCore-8.0.iso",
- "ostype": 'CentOS 6.3 (64-bit)',
+ "ostype": 'Other Linux (64-bit)',
"mode": 'HTTP_DOWNLOAD'
},
"setHostConfigurationForIngressRule": False,
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index f66a209..2c5f534 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -999,7 +999,7 @@ class Volume:
cmd.zoneid = services["zoneid"]
if "size" in services:
cmd.size = services["size"]
- if services["ispublic"]:
+ if "ispublic" in services:
cmd.ispublic = services["ispublic"]
else:
cmd.ispublic = False
--
To stop receiving notification emails like this one, please contact
['"commits@cloudstack.apache.org" <co...@cloudstack.apache.org>'].