You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by wi...@apache.org on 2015/01/02 15:35:53 UTC

[01/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Repository: cloudstack
Updated Branches:
  refs/heads/reporter 74f4d8248 -> b26f3fcb0 (forced update)


CLOUDSTACK-6744 > UI > zone wizard > baremetal hypervisor > support EIP ELB feature.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/923c65d7
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/923c65d7
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/923c65d7

Branch: refs/heads/reporter
Commit: 923c65d7ce791e86d2e949a9d55bfc666bda1cfe
Parents: ad258cc
Author: Jessica Wang <je...@apache.org>
Authored: Thu Dec 18 18:13:14 2014 -0800
Committer: Jessica Wang <je...@apache.org>
Committed: Thu Dec 18 18:15:06 2014 -0800

----------------------------------------------------------------------
 ui/scripts/zoneWizard.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/923c65d7/ui/scripts/zoneWizard.js
----------------------------------------------------------------------
diff --git a/ui/scripts/zoneWizard.js b/ui/scripts/zoneWizard.js
index 32ffb9d..13bc3ab 100755
--- a/ui/scripts/zoneWizard.js
+++ b/ui/scripts/zoneWizard.js
@@ -597,7 +597,7 @@
                                         });
 
                                         if (thisNetworkOffering.havingEIP == true && thisNetworkOffering.havingELB == true) { //EIP ELB
-                                            if (args.hypervisor == "VMware" || args.hypervisor == "BareMetal") { //VMware, BareMetal don't support EIP ELB
+                                            if (args.hypervisor == "VMware") { //VMware does not support EIP ELB
                                                 return true; //move to next item in $.each() loop
                                             }
                                             if (args.context.zones[0]["network-model"] == "Advanced" && args.context.zones[0]["zone-advanced-sg-enabled"] == "on") { // Advanced SG-enabled zone doesn't support EIP ELB


[18/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8107. Failed to create snapshot from volume when the task is performed repeatedly in zone wide primary Storage.
While taking a snapshot of a volume, CS chooses the endpoint to perform backup snapshot operation by selecting any host that has the storage containing the volume mounted on it.
Instead, if the volume is attached to a VM, the endpoint chosen by CS should be the host that contains the VM.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/a75a4313
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/a75a4313
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/a75a4313

Branch: refs/heads/reporter
Commit: a75a43137316a60b20760aa5015d97f55520fd16
Parents: ff37fa5
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Oct 13 16:26:47 2014 +0530
Committer: Sanjay Tripathi <sa...@citrix.com>
Committed: Tue Dec 23 09:48:35 2014 +0530

----------------------------------------------------------------------
 .../storage/endpoint/DefaultEndPointSelector.java       | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a75a4313/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 18c8991..7067b8c 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -205,13 +205,21 @@ public class DefaultEndPointSelector implements EndPointSelector {
     public EndPoint select(DataObject srcData, DataObject destData, StorageAction action) {
         if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) {
             SnapshotInfo srcSnapshot = (SnapshotInfo)srcData;
+            VolumeInfo volumeInfo = srcSnapshot.getBaseVolume();
+            VirtualMachine vm = volumeInfo.getAttachedVM();
             if (srcSnapshot.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
-                VolumeInfo volumeInfo = srcSnapshot.getBaseVolume();
-                VirtualMachine vm = volumeInfo.getAttachedVM();
                 if (vm != null && vm.getState() == VirtualMachine.State.Running) {
                     return getEndPointFromHostId(vm.getHostId());
                 }
             }
+            if (srcSnapshot.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
+                if (vm != null) {
+                    Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
+                    if (hostId != null) {
+                        return getEndPointFromHostId(hostId);
+                    }
+                }
+            }
         }
         return select(srcData, destData);
     }


[42/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8125. VM fails to start on the first attempt after a cold migration.
Update volume chain_info to NULL during cold migration.
Otherwise during VM start, CCP will configure and try to power-on the VM with wrong disk information.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/7b32b8a2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/7b32b8a2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/7b32b8a2

Branch: refs/heads/reporter
Commit: 7b32b8a26808a546d1dbfdf4728c6228a41852b6
Parents: 1c0bf32
Author: Likitha Shetty <li...@citrix.com>
Authored: Sat Dec 6 14:43:03 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 14:33:51 2014 +0530

----------------------------------------------------------------------
 .../org/apache/cloudstack/storage/volume/VolumeServiceImpl.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7b32b8a2/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index c00785e..5527c3d 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -830,7 +830,7 @@ public class VolumeServiceImpl implements VolumeService {
 
         VolumeVO newVol = new VolumeVO(volume);
         newVol.setInstanceId(null);
-        newVol.setPoolId(pool.getId());
+        newVol.setChainInfo(null);
         newVol.setFolder(folder);
         newVol.setPodId(pool.getPodId());
         newVol.setPoolId(pool.getId());


[33/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8118. Root volume migration fails with 'No such disk device' in case of vCenter 5.5 setup.
If an exact match is being done while locating disk chain by name, don't trim snapshot postfix appended to the disk name.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ddcae8a9
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ddcae8a9
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ddcae8a9

Branch: refs/heads/reporter
Commit: ddcae8a9306f5f7dd416b859e59fd10094c91551
Parents: 521258b
Author: Likitha Shetty <li...@citrix.com>
Authored: Fri Nov 28 11:09:01 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 10:19:22 2014 +0530

----------------------------------------------------------------------
 .../cloud/hypervisor/vmware/resource/VmwareResource.java |  2 +-
 .../com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java | 11 +++++++----
 2 files changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ddcae8a9/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 5a16f03..53cdb99 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -3205,7 +3205,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
     }
 
     private int getVirtualDiskInfo(VirtualMachineMO vmMo, String srcDiskName) throws Exception {
-        Pair<VirtualDisk, String> deviceInfo = vmMo.getDiskDevice(srcDiskName, true);
+        Pair<VirtualDisk, String> deviceInfo = vmMo.getDiskDevice(srcDiskName, false);
         if (deviceInfo == null) {
             throw new Exception("No such disk device: " + srcDiskName);
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ddcae8a9/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index 471b4a8..be39bfb 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -1931,10 +1931,13 @@ public class VirtualMachineMO extends BaseMO {
 
         DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath);
         String srcBaseName = dsSrcFile.getFileBaseName();
+        String trimmedSrcBaseName = trimSnapshotDeltaPostfix(srcBaseName);
 
-        srcBaseName = trimSnapshotDeltaPostfix(srcBaseName);
-
-        s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with trimmed base name: " + srcBaseName);
+        if (matchExactly) {
+            s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);
+        } else {
+            s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with trimmed base name: " + trimmedSrcBaseName);
+        }
 
         if (devices != null && devices.size() > 0) {
             for (VirtualDevice device : devices) {
@@ -1957,7 +1960,7 @@ public class VirtualMachineMO extends BaseMO {
                                     return new Pair<VirtualDisk, String>((VirtualDisk)device, deviceNumbering);
                                 }
                             } else {
-                                if (backingBaseName.contains(srcBaseName)) {
+                                if (backingBaseName.contains(trimmedSrcBaseName)) {
                                     String deviceNumbering = getDeviceBusName(devices, device);
 
                                     s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);


[17/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8095: Fixed Iso attach issue in test_escalations_instances.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ff37fa5d
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ff37fa5d
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ff37fa5d

Branch: refs/heads/reporter
Commit: ff37fa5de0b241abf12a782effc50fbdf9f0927a
Parents: f52f968
Author: Ashutosh K <as...@clogeny.com>
Authored: Fri Dec 19 12:51:07 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Mon Dec 22 13:03:20 2014 +0530

----------------------------------------------------------------------
 .../component/test_escalations_instances.py     | 182 ++++++++++---------
 1 file changed, 96 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ff37fa5d/test/integration/component/test_escalations_instances.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_escalations_instances.py b/test/integration/component/test_escalations_instances.py
index 8724ab7..1e5ec53 100644
--- a/test/integration/component/test_escalations_instances.py
+++ b/test/integration/component/test_escalations_instances.py
@@ -1577,8 +1577,7 @@ class TestListInstances(cloudstackTestCase):
         new_keypair = SSHKeyPair.register(
             self.userapiclient,
             name="keypair1",
-            publickey="ssh-rsa: e6:9a:1e:b5:98:75:88:5d:56:bc:92:7b:43:48:05:b2"
-            )
+            publickey="ssh-rsa: e6:9a:1e:b5:98:75:88:5d:56:bc:92:7b:43:48:05:b2")
         self.assertIsNotNone(
             new_keypair,
             "New Key pair generation failed"
@@ -2042,27 +2041,27 @@ class TestListInstances(cloudstackTestCase):
             )
             status = validateList(list_vms_after)
             self.assertEquals(
-            PASS,
-            status[0],
-            "Listing of VM failed"
+                PASS,
+                status[0],
+                "Listing of VM failed"
             )
             vm = list_vms_after[0]
             # Verifying that VM nics size is 1 now
             vm_nics_after = vm.nic
             self.assertIsNotNone(
-            vm_nics_after,
-            "Nic not found for the deployed VM"
+                vm_nics_after,
+                "Nic not found for the deployed VM"
             )
             self.assertEquals(
-            1,
-            len(vm_nics_after),
-            "VM NIC's count is not matching"
+                1,
+                len(vm_nics_after),
+                "VM NIC's count is not matching"
             )
             # Verifying the nic network is same as the default nic network
             self.assertEquals(
-            network2.id,
-            vm_nics_after[0].networkid,
-            "VM NIC is not same as expected"
+                network2.id,
+                vm_nics_after[0].networkid,
+                "VM NIC is not same as expected"
             )
         return
 
@@ -2125,23 +2124,23 @@ class TestInstances(cloudstackTestCase):
         self.apiClient = self.testClient.getApiClient()
         self.cleanup = []
         self.account = Account.create(
-                self.apiClient,
-                self.services["account"],
-                domainid=self.domain.id
-            )
-            # Getting authentication for user in newly created Account
+            self.apiClient,
+            self.services["account"],
+            domainid=self.domain.id
+        )
+        # Getting authentication for user in newly created Account
         self.user = self.account.user[0]
         self.userapiclient = self.testClient.getUserApiClient(
-                self.user.username,
-                self.domain.name)
+            self.user.username,
+            self.domain.name)
         # Updating resource Limits
         for i in range(0, 8):
             Resources.updateLimit(
-                    self.api_client,
-                    account=self.account.name,
-                    domainid=self.domain.id,
-                    max=-1,
-                    resourcetype=i
+                self.api_client,
+                account=self.account.name,
+                domainid=self.domain.id,
+                max=-1,
+                resourcetype=i
             )
         self.cleanup.append(self.account)
 
@@ -2239,6 +2238,7 @@ class TestInstances(cloudstackTestCase):
             vm_created,
             "VM creation failed"
         )
+        self.cleanup.append(vm_created)
         # Listing all the VMs for a user again for matching zone
         list_vms_after = VirtualMachine.list(
             self.userapiclient,
@@ -2268,60 +2268,70 @@ class TestInstances(cloudstackTestCase):
             isofilter="executable",
             zoneid=self.zone.id
         )
-        # Verifying if size of the list is >= 1
-        if isos_list is not None:
-            iso_toattach = isos_list[0]
-            # Attaching ISO listed to VM deployed
-            VirtualMachine.attach_iso(
-                vm_created,
-                self.userapiclient,
-                iso_toattach
-            )
-            list_vm = VirtualMachine.list(
-                self.userapiclient,
-                id=vm_created.id
-            )
-            status = validateList(list_vm)
-            self.assertEquals(
-                PASS,
-                status[0],
-                "VM listing by Id failed"
-            )
-            # Verifying that attached ISO details are present in VM
-            self.assertEquals(
-                iso_toattach.name,
-                list_vm[0].isoname,
-                "Attached ISO name is not matching"
-            )
-            self.assertEquals(
-                iso_toattach.displaytext,
-                list_vm[0].isodisplaytext,
-                "Attached ISO display is not matching"
-            )
-            # Detaching ISO from VM
-            VirtualMachine.detach_iso(
-                vm_created,
-                self.userapiclient
-            )
-            list_vm = VirtualMachine.list(
-                self.userapiclient,
-                id=vm_created.id
-            )
-            status = validateList(list_vm)
-            self.assertEquals(
-                PASS,
-                status[0],
-                "VM listing by Id failed"
-            )
-            # Verifying that ISO details are NOT present in VM
-            self.assertIsNone(
-                list_vm[0].isoname,
-                "ISO not detached from VM"
-            )
+
+        self.assertEqual(validateList(isos_list)[0],
+                         PASS,
+                         "isos list validation failed")
+
+        if self.hypervisor.lower() == "xenserver":
+            isoDesc = "xen"
         else:
-            self.fail(
-                "Executable ISO in Ready is not found in the given setup")
+            isoDesc = "vmware"
+
+        validIsosToAttach = [iso for iso in isos_list
+                             if isoDesc in iso.displaytext.lower()]
+        if not validIsosToAttach:
+            self.skipTest(
+                "Valid ISO not present in setup suitable to attach to VM")
 
+        iso_toattach = validIsosToAttach[0]
+        # Attaching ISO listed to VM deployed
+        VirtualMachine.attach_iso(
+            vm_created,
+            self.userapiclient,
+            iso_toattach
+        )
+        list_vm = VirtualMachine.list(
+            self.userapiclient,
+            id=vm_created.id
+        )
+        status = validateList(list_vm)
+        self.assertEquals(
+            PASS,
+            status[0],
+            "VM listing by Id failed"
+        )
+        # Verifying that attached ISO details are present in VM
+        self.assertEquals(
+            iso_toattach.name,
+            list_vm[0].isoname,
+            "Attached ISO name is not matching"
+        )
+        self.assertEquals(
+            iso_toattach.displaytext,
+            list_vm[0].isodisplaytext,
+            "Attached ISO display is not matching"
+        )
+        # Detaching ISO from VM
+        VirtualMachine.detach_iso(
+            vm_created,
+            self.userapiclient
+        )
+        list_vm = VirtualMachine.list(
+            self.userapiclient,
+            id=vm_created.id
+        )
+        status = validateList(list_vm)
+        self.assertEquals(
+            PASS,
+            status[0],
+            "VM listing by Id failed"
+        )
+        # Verifying that ISO details are NOT present in VM
+        self.assertIsNone(
+            list_vm[0].isoname,
+            "ISO not detached from VM"
+        )
         return
 
     @attr(tags=["advanced", "basic"], required_hardware="true")
@@ -2870,9 +2880,9 @@ class TestInstances(cloudstackTestCase):
         )
 
         self.assertEqual(
-                len(volumes),
-                len(list_volumes_page1) - len(list_data_disks),
-                "The volumes number should match with (volumes initially\
+            len(volumes),
+            len(list_volumes_page1) - len(list_data_disks),
+            "The volumes number should match with (volumes initially\
                         present minus volumes detached")
 
         return
@@ -2980,11 +2990,11 @@ class TestInstances(cloudstackTestCase):
             if service_offerings_list is not None:
                 for i in range(0, len(service_offerings_list)):
                     if not ((current_so.cpunumber >
-                            service_offerings_list[i].cpunumber or
+                             service_offerings_list[i].cpunumber or
                              current_so.cpuspeed >
-                            service_offerings_list[i].cpuspeed or
+                             service_offerings_list[i].cpuspeed or
                              current_so.memory >
-                            service_offerings_list[i].memory) or
+                             service_offerings_list[i].memory) or
                             (current_so.cpunumber ==
                                 service_offerings_list[i].cpunumber and
                              current_so.cpuspeed ==
@@ -2993,9 +3003,9 @@ class TestInstances(cloudstackTestCase):
                                 service_offerings_list[i].memory)):
                         if(current_so.storagetype ==
                                 service_offerings_list[i].storagetype):
-                                so_exists = True
-                                new_so = service_offerings_list[i]
-                                break
+                            so_exists = True
+                            new_so = service_offerings_list[i]
+                            break
             # If service offering does not exists, then creating one service
             # offering for scale up
             if not so_exists:


[15/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f52f968c/test/integration/component/test_vpc.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc.py b/test/integration/component/test_vpc.py
index 13f0eff..58960a9 100644
--- a/test/integration/component/test_vpc.py
+++ b/test/integration/component/test_vpc.py
@@ -17,159 +17,178 @@
 
 """ Component tests for VPC functionality
 """
-#Import Local Modules
+# Import Local Modules
 from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import *
+from marvin.cloudstackTestCase import cloudstackTestCase
 from marvin.cloudstackException import CloudstackAPIException
-from marvin.cloudstackAPI import *
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+from marvin.cloudstackAPI import updateZone
+from marvin.lib.utils import cleanup_resources
+from marvin.lib.base import (Account,
+                             VPC,
+                             VpcOffering,
+                             VirtualMachine,
+                             ServiceOffering,
+                             Network,
+                             NetworkOffering,
+                             PublicIPAddress,
+                             LoadBalancerRule,
+                             Router,
+                             NetworkACL,
+                             NATRule,
+                             Zone,
+                             StaticNATRule)
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template,
+                               list_configurations)
+import time
 
 
 class Services:
+
     """Test VPC services
     """
 
     def __init__(self):
         self.services = {
-                         "account": {
-                                    "email": "test@test.com",
-                                    "firstname": "Test",
-                                    "lastname": "User",
-                                    "username": "test",
-                                    # Random characters are appended for unique
-                                    # username
-                                    "password": "password",
-                                    },
-                          "domain_admin": {
-                                    "email": "domain@admin.com",
-                                    "firstname": "Domain",
-                                    "lastname": "Admin",
-                                    "username": "DoA",
-                                    # Random characters are appended for unique
-                                    # username
-                                    "password": "password",
-                                    },
-                          "service_offering": {
-                                    "name": "Tiny Instance",
-                                    "displaytext": "Tiny Instance",
-                                    "cpunumber": 1,
-                                    "cpuspeed": 100,
-                                    "memory": 128,
-                                    },
-                         "network_offering": {
-                                    "name": 'VPC Network offering',
-                                    "displaytext": 'VPC Network off',
-                                    "guestiptype": 'Isolated',
-                                    "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
-                                    "traffictype": 'GUEST',
-                                    "availability": 'Optional',
-                                    "useVpc": 'on',
-                                    "serviceProviderList": {
-                                            "Vpn": 'VpcVirtualRouter',
-                                            "Dhcp": 'VpcVirtualRouter',
-                                            "Dns": 'VpcVirtualRouter',
-                                            "SourceNat": 'VpcVirtualRouter',
-                                            "PortForwarding": 'VpcVirtualRouter',
-                                            "Lb": 'VpcVirtualRouter',
-                                            "UserData": 'VpcVirtualRouter',
-                                            "StaticNat": 'VpcVirtualRouter',
-                                            "NetworkACL": 'VpcVirtualRouter'
-                                        },
-                                },
-                         "network_offering_no_lb": {
-                                    "name": 'VPC Network offering',
-                                    "displaytext": 'VPC Network off',
-                                    "guestiptype": 'Isolated',
-                                    "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
-                                    "traffictype": 'GUEST',
-                                    "availability": 'Optional',
-                                    "useVpc": 'on',
-                                    "serviceProviderList": {
-                                            "Vpn": 'VpcVirtualRouter',
-                                            "Dhcp": 'VpcVirtualRouter',
-                                            "Dns": 'VpcVirtualRouter',
-                                            "SourceNat": 'VpcVirtualRouter',
-                                            "PortForwarding": 'VpcVirtualRouter',
-                                            "UserData": 'VpcVirtualRouter',
-                                            "StaticNat": 'VpcVirtualRouter',
-                                            "NetworkACL": 'VpcVirtualRouter'
-                                        },
-                                },
-                         "vpc_offering": {
-                                    "name": 'VPC off',
-                                    "displaytext": 'VPC off',
-                                    "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL',
-                                },
-                         "vpc": {
-                                 "name": "TestVPC",
-                                 "displaytext": "TestVPC",
-                                 "cidr": '10.0.0.1/24'
-                                 },
-                         "vpc_no_name": {
-                                 "displaytext": "TestVPC",
-                                 "cidr": '10.0.0.1/24'
-                                 },
-                         "network": {
-                                  "name": "Test Network",
-                                  "displaytext": "Test Network",
-                                  "netmask": '255.255.255.0'
-                                },
-                         "lbrule": {
-                                    "name": "SSH",
-                                    "alg": "leastconn",
-                                    # Algorithm used for load balancing
-                                    "privateport": 22,
-                                    "publicport": 2222,
-                                    "openfirewall": False,
-                                    "startport": 22,
-                                    "endport": 2222,
-                                    "protocol": "TCP",
-                                    "cidrlist": '0.0.0.0/0',
-                                },
-                         "natrule": {
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "startport": 22,
-                                    "endport": 22,
-                                    "protocol": "TCP",
-                                    "cidrlist": '0.0.0.0/0',
-                                },
-                         "fw_rule": {
-                                    "startport": 1,
-                                    "endport": 6000,
-                                    "cidr": '0.0.0.0/0',
-                                    # Any network (For creating FW rule)
-                                    "protocol": "TCP"
-                                },
-                         "icmp_rule": {
-                                    "icmptype": -1,
-                                    "icmpcode": -1,
-                                    "cidrlist": '0.0.0.0/0',
-                                    "protocol": "ICMP"
-                                },
-                         "virtual_machine": {
-                                    "displayname": "Test VM",
-                                    "username": "root",
-                                    "password": "password",
-                                    "ssh_port": 22,
-                                    "hypervisor": 'XenServer',
-                                    # Hypervisor type should be same as
-                                    # hypervisor type of cluster
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "protocol": 'TCP',
-                                },
-                        "domain": {
-                                    "name": "TestDomain"
-                                },
-                         "ostype": 'CentOS 5.3 (64-bit)',
-                         # Cent OS 5.3 (64 bit)
-                         "sleep": 60,
-                         "timeout": 10,
-                         "mode": 'advanced'
-                    }
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "domain_admin": {
+                "email": "domain@admin.com",
+                "firstname": "Domain",
+                "lastname": "Admin",
+                "username": "DoA",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "service_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+            },
+            "network_offering": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network off',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "network_offering_no_lb": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network off',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "vpc_offering": {
+                "name": 'VPC off',
+                "displaytext": 'VPC off',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL',
+            },
+            "vpc": {
+                "name": "TestVPC",
+                "displaytext": "TestVPC",
+                "cidr": '10.0.0.1/24'
+            },
+            "vpc_no_name": {
+                "displaytext": "TestVPC",
+                "cidr": '10.0.0.1/24'
+            },
+            "network": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+                "netmask": '255.255.255.0'
+            },
+            "lbrule": {
+                "name": "SSH",
+                "alg": "leastconn",
+                # Algorithm used for load balancing
+                "privateport": 22,
+                "publicport": 2222,
+                "openfirewall": False,
+                "startport": 22,
+                "endport": 2222,
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "natrule": {
+                "privateport": 22,
+                "publicport": 22,
+                "startport": 22,
+                "endport": 22,
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "fw_rule": {
+                "startport": 1,
+                "endport": 6000,
+                "cidr": '0.0.0.0/0',
+                # Any network (For creating FW rule)
+                "protocol": "TCP"
+            },
+            "icmp_rule": {
+                "icmptype": -1,
+                "icmpcode": -1,
+                "cidrlist": '0.0.0.0/0',
+                "protocol": "ICMP"
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "hypervisor": 'XenServer',
+                # Hypervisor type should be same as
+                # hypervisor type of cluster
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "domain": {
+                "name": "TestDomain"
+            },
+            "ostype": 'CentOS 5.3 (64-bit)',
+            # Cent OS 5.3 (64 bit)
+            "sleep": 60,
+            "timeout": 10,
+            "mode": 'advanced'
+        }
+
 
 class TestVPC(cloudstackTestCase):
 
@@ -183,31 +202,31 @@ class TestVPC(cloudstackTestCase):
         cls.domain = get_domain(cls.api_client)
         cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
         cls.template = get_template(
-                            cls.api_client,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
 
         cls.service_offering = ServiceOffering.create(
-                                            cls.api_client,
-                                            cls.services["service_offering"]
-                                            )
+            cls.api_client,
+            cls.services["service_offering"]
+        )
         cls.vpc_off = VpcOffering.create(
-                                     cls.api_client,
-                                     cls.services["vpc_offering"]
-                                     )
+            cls.api_client,
+            cls.services["vpc_offering"]
+        )
         cls.vpc_off.update(cls.api_client, state='Enabled')
         cls._cleanup = [
-                        cls.service_offering,
-                        ]
+            cls.service_offering,
+        ]
         return
 
     @classmethod
     def tearDownClass(cls):
         try:
-            #Cleanup resources used
+            # Cleanup resources used
             cleanup_resources(cls.api_client, cls._cleanup)
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
@@ -217,11 +236,11 @@ class TestVPC(cloudstackTestCase):
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.account = Account.create(
-                                     self.apiclient,
-                                     self.services["account"],
-                                     admin=True,
-                                     domainid=self.domain.id
-                                     )
+            self.apiclient,
+            self.services["account"],
+            admin=True,
+            domainid=self.domain.id
+        )
         self.cleanup = []
         self.cleanup.insert(0, self.account)
         return
@@ -231,7 +250,6 @@ class TestVPC(cloudstackTestCase):
             cleanup_resources(self.apiclient, self.cleanup)
         except Exception as e:
             self.debug("Warning: Exception during cleanup : %s" % e)
-            #raise Exception("Warning: Exception during cleanup : %s" % e)
         return
 
     def validate_vpc_offering(self, vpc_offering):
@@ -239,22 +257,22 @@ class TestVPC(cloudstackTestCase):
 
         self.debug("Check if the VPC offering is created successfully?")
         vpc_offs = VpcOffering.list(
-                                    self.apiclient,
-                                    id=vpc_offering.id
-                                    )
+            self.apiclient,
+            id=vpc_offering.id
+        )
         self.assertEqual(
-                         isinstance(vpc_offs, list),
-                         True,
-                         "List VPC offerings should return a valid list"
-                         )
+            isinstance(vpc_offs, list),
+            True,
+            "List VPC offerings should return a valid list"
+        )
         self.assertEqual(
-                 vpc_offering.name,
-                 vpc_offs[0].name,
-                "Name of the VPC offering should match with listVPCOff data"
-                )
+            vpc_offering.name,
+            vpc_offs[0].name,
+            "Name of the VPC offering should match with listVPCOff data"
+        )
         self.debug(
-                "VPC offering is created successfully - %s" %
-                                                        vpc_offering.name)
+            "VPC offering is created successfully - %s" %
+            vpc_offering.name)
         return
 
     def validate_vpc_network(self, network, state=None):
@@ -262,29 +280,30 @@ class TestVPC(cloudstackTestCase):
 
         self.debug("Check if the VPC network is created successfully?")
         vpc_networks = VPC.list(
-                                    self.apiclient,
-                                    id=network.id
-                          )
+            self.apiclient,
+            id=network.id
+        )
         self.assertEqual(
-                         isinstance(vpc_networks, list),
-                         True,
-                         "List VPC network should return a valid list"
-                         )
+            isinstance(vpc_networks, list),
+            True,
+            "List VPC network should return a valid list"
+        )
         self.assertEqual(
-                 network.name,
-                 vpc_networks[0].name,
-                "Name of the VPC network should match with listVPC data"
-                )
+            network.name,
+            vpc_networks[0].name,
+            "Name of the VPC network should match with listVPC data"
+        )
         if state:
             self.assertEqual(
-                 vpc_networks[0].state,
-                 state,
+                vpc_networks[0].state,
+                state,
                 "VPC state should be '%s'" % state
-                )
+            )
         self.debug("VPC network validated - %s" % network.name)
         return
 
-    #list_vpc_apis should be the first case otherwise the vpc counts would be wrong
+    # list_vpc_apis should be the first case otherwise the vpc counts would be
+    # wrong
     @attr(tags=["advanced", "intervlan"], required_hardware="false")
     def test_01_list_vpc_apis(self):
         """ Test list VPC APIs
@@ -306,162 +325,161 @@ class TestVPC(cloudstackTestCase):
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc_1 = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc_1)
 
         self.services["vpc"]["cidr"] = "10.1.46.1/16"
         vpc_2 = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc_2)
 
         self.debug("Check list VPC API by Name?")
         vpcs = VPC.list(
-                        self.apiclient,
-                        name=vpc_1.name,
-                        listall=True
-                        )
+            self.apiclient,
+            name=vpc_1.name,
+            listall=True
+        )
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC shall return a valid resposne"
-                        )
+            isinstance(vpcs, list),
+            True,
+            "List VPC shall return a valid resposne"
+        )
         vpc = vpcs[0]
         self.assertEqual(
-                         vpc.name,
-                         vpc_1.name,
-                         "VPC name should match with the existing one"
-                         )
+            vpc.name,
+            vpc_1.name,
+            "VPC name should match with the existing one"
+        )
 
         self.debug("Check list VPC API by displayText?")
         vpcs = VPC.list(
-                        self.apiclient,
-                        displaytext=vpc_1.displaytext,
-                        listall=True
-                        )
+            self.apiclient,
+            displaytext=vpc_1.displaytext,
+            listall=True
+        )
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC shall return a valid resposne"
-                        )
+            isinstance(vpcs, list),
+            True,
+            "List VPC shall return a valid resposne"
+        )
         vpc = vpcs[0]
         self.assertEqual(
-                         vpc.displaytext,
-                         vpc_1.displaytext,
-                         "VPC displaytext should match with the existing one"
-                         )
+            vpc.displaytext,
+            vpc_1.displaytext,
+            "VPC displaytext should match with the existing one"
+        )
 
         self.debug("Check list VPC API by cidr?")
         vpcs = VPC.list(
-                        self.apiclient,
-                        cidr=vpc_2.cidr,
-                        listall=True
-                        )
+            self.apiclient,
+            cidr=vpc_2.cidr,
+            listall=True
+        )
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC shall return a valid resposne"
-                        )
+            isinstance(vpcs, list),
+            True,
+            "List VPC shall return a valid resposne"
+        )
         vpc = vpcs[0]
         self.assertEqual(
-                         vpc.cidr,
-                         vpc_2.cidr,
-                         "VPC cidr should match with the existing one"
-                         )
+            vpc.cidr,
+            vpc_2.cidr,
+            "VPC cidr should match with the existing one"
+        )
         self.debug("Validating list VPC by Id")
         self.validate_vpc_network(vpc_1)
 
         self.debug("Validating list VPC by vpcofferingId")
         vpcs = VPC.list(
-                        self.apiclient,
-                        vpcofferingid=self.vpc_off.id,
-                        listall=True
-                        )
+            self.apiclient,
+            vpcofferingid=self.vpc_off.id,
+            listall=True
+        )
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC by vpcofferingId should return a valid response"
-                    )
+            isinstance(vpcs, list),
+            True,
+            "List VPC by vpcofferingId should return a valid response"
+        )
         self.debug("Length of list VPC response: %s" % len(vpcs))
         self.assertEqual(
-                        len(vpcs),
-                        2,
-                        "List VPC should return 2 enabled VPCs"
-                        )
+            len(vpcs),
+            2,
+            "List VPC should return 2 enabled VPCs"
+        )
         for vpc in vpcs:
             self.assertEqual(
-                            vpc.vpcofferingid,
-                            self.vpc_off.id,
-                            "VPC offering ID should match with that of resposne"
-                            )
+                vpc.vpcofferingid,
+                self.vpc_off.id,
+                "VPC offering ID should match with that of resposne"
+            )
 
         self.debug("Validating list VPC by supportedservices")
         vpcs = VPC.list(
-                        self.apiclient,
-                        supportedservices='Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
-                        listall=True,
-                        account=self.account.name,
-                        domainid=self.account.domainid
-                        )
+            self.apiclient,
+            supportedservices='Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
+            listall=True,
+            account=self.account.name,
+            domainid=self.account.domainid)
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC by vpcofferingId should return a valid response"
-                    )
+            isinstance(vpcs, list),
+            True,
+            "List VPC by vpcofferingId should return a valid response"
+        )
         for vpc in vpcs:
             self.assertIn(
-                            vpc.id,
-                            [vpc_1.id, vpc_2.id],
-                            "VPC offering ID should match with that of resposne"
-                            )
+                vpc.id,
+                [vpc_1.id, vpc_2.id],
+                "VPC offering ID should match with that of resposne"
+            )
         self.debug("Validating list VPC by restart required")
         vpcs = VPC.list(
-                        self.apiclient,
-                        restartrequired=True,
-                        listall=True,
-                        account=self.account.name,
-                        domainid=self.account.domainid
-                        )
+            self.apiclient,
+            restartrequired=True,
+            listall=True,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         if vpcs is not None:
             for vpc in vpcs:
                 self.assertEqual(
-                            vpc.restartrequired,
-                            True,
-                            "RestartRequired should be set as True"
-                            )
+                    vpc.restartrequired,
+                    True,
+                    "RestartRequired should be set as True"
+                )
         self.debug("Validating list VPC by restart required")
         vpcs = VPC.list(
-                        self.apiclient,
-                        restartrequired=False,
-                        listall=True,
-                        account=self.account.name,
-                        domainid=self.account.domainid
-                        )
+            self.apiclient,
+            restartrequired=False,
+            listall=True,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.assertEqual(
-                        isinstance(vpcs, list),
-                        True,
-                        "List VPC by vpcofferingId should return a valid response"
-                    )
+            isinstance(vpcs, list),
+            True,
+            "List VPC by vpcofferingId should return a valid response"
+        )
         if vpcs is not None:
             for vpc in vpcs:
                 self.assertEqual(
-                            vpc.restartrequired,
-                            False,
-                            "RestartRequired should be set as False"
-                            )
+                    vpc.restartrequired,
+                    False,
+                    "RestartRequired should be set as False"
+                )
         return
 
     @attr(tags=["advanced", "intervlan"], required_hardware="false")
@@ -475,15 +493,15 @@ class TestVPC(cloudstackTestCase):
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.debug("Restarting the VPC with no network")
@@ -507,22 +525,22 @@ class TestVPC(cloudstackTestCase):
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.network_offering = NetworkOffering.create(
-                                            self.apiclient,
-                                            self.services["network_offering"],
-                                            conservemode=False
-                                            )
+            self.apiclient,
+            self.services["network_offering"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering)
@@ -534,41 +552,41 @@ class TestVPC(cloudstackTestCase):
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering.id)
+                   self.network_offering.id)
         network_1 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering.id,
-                                zoneid=self.zone.id,
-                                gateway=gateway,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering.id,
+            zoneid=self.zone.id,
+            gateway=gateway,
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_1.id)
 
         self.network_offering_no_lb = NetworkOffering.create(
-                                    self.apiclient,
-                                    self.services["network_offering_no_lb"],
-                                    conservemode=False
-                                    )
+            self.apiclient,
+            self.services["network_offering_no_lb"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering_no_lb.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering_no_lb)
 
         gateway = '10.1.2.1'    # New network -> different gateway
         self.debug("Creating network with network offering: %s" %
-                                            self.network_offering_no_lb.id)
+                   self.network_offering_no_lb.id)
         network_2 = Network.create(
-                            self.apiclient,
-                            self.services["network"],
-                            accountid=self.account.name,
-                            domainid=self.account.domainid,
-                            networkofferingid=self.network_offering_no_lb.id,
-                            zoneid=self.zone.id,
-                            gateway=gateway,
-                            vpcid=vpc.id
-                            )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering_no_lb.id,
+            zoneid=self.zone.id,
+            gateway=gateway,
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_2.id)
 
         self.debug("Restarting the VPC with no network")
@@ -591,15 +609,15 @@ class TestVPC(cloudstackTestCase):
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.debug("Restarting the VPC with no network")
@@ -610,14 +628,14 @@ class TestVPC(cloudstackTestCase):
 
         self.debug("Check if the VPC offering is deleted successfully?")
         vpcs = VPC.list(
-                                    self.apiclient,
-                                    id=vpc.id
-                                    )
+            self.apiclient,
+            id=vpc.id
+        )
         self.assertEqual(
-                         vpcs,
-                         None,
-                         "List VPC offerings should not return anything"
-                         )
+            vpcs,
+            None,
+            "List VPC offerings should not return anything"
+        )
         return
 
     @attr(tags=["advanced", "intervlan"], required_hardware="false")
@@ -634,22 +652,22 @@ class TestVPC(cloudstackTestCase):
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.network_offering = NetworkOffering.create(
-                                            self.apiclient,
-                                            self.services["network_offering"],
-                                            conservemode=False
-                                            )
+            self.apiclient,
+            self.services["network_offering"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering)
@@ -661,41 +679,41 @@ class TestVPC(cloudstackTestCase):
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering.id)
+                   self.network_offering.id)
         network_1 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering.id,
-                                zoneid=self.zone.id,
-                                gateway=gateway,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering.id,
+            zoneid=self.zone.id,
+            gateway=gateway,
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_1.id)
 
         self.network_offering_no_lb = NetworkOffering.create(
-                                            self.apiclient,
-                                            self.services["network_offering_no_lb"],
-                                            conservemode=False
-                                            )
+            self.apiclient,
+            self.services["network_offering_no_lb"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering_no_lb.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering_no_lb)
 
         gateway = '10.1.2.1'    # New network -> different gateway
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering_no_lb.id)
+                   self.network_offering_no_lb.id)
         network_2 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering_no_lb.id,
-                                zoneid=self.zone.id,
-                                gateway=gateway,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering_no_lb.id,
+            zoneid=self.zone.id,
+            gateway=gateway,
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_2.id)
 
         self.debug("Deleting the VPC with no network")
@@ -718,37 +736,38 @@ class TestVPC(cloudstackTestCase):
 
         self.debug("Check if the VPC offering is deleted successfully?")
         vpcs = VPC.list(
-                        self.apiclient,
-                        id=vpc.id
-                        )
+            self.apiclient,
+            id=vpc.id
+        )
         self.assertEqual(
-                         vpcs,
-                         None,
-                         "List VPC offerings should not return anything"
-                         )
-        self.debug("Waiting for network.gc.interval to cleanup network resources")
+            vpcs,
+            None,
+            "List VPC offerings should not return anything"
+        )
+        self.debug(
+            "Waiting for network.gc.interval to cleanup network resources")
         interval = list_configurations(
-                                    self.apiclient,
-                                    name='network.gc.interval'
-                                    )
+            self.apiclient,
+            name='network.gc.interval'
+        )
         wait = list_configurations(
-                                    self.apiclient,
-                                    name='network.gc.wait'
-                                   )
+            self.apiclient,
+            name='network.gc.wait'
+        )
         # Sleep to ensure that all resources are deleted
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Check if VR is deleted or not?")
         routers = Router.list(
-                            self.apiclient,
-                            account=self.account.name,
-                            domainid=self.account.domainid,
-                            listall=True
-                            )
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid,
+            listall=True
+        )
         self.assertEqual(
-                        routers,
-                        None,
-                        "List Routers for the account should not return any response"
-                        )
+            routers,
+            None,
+            "List Routers for the account should not return any response"
+        )
         return
 
     @attr(tags=["advanced", "intervlan"], required_hardware="false")
@@ -762,53 +781,53 @@ class TestVPC(cloudstackTestCase):
         # 3. list VPCS as domain admin User to view all the Vpcs owned by admin
 
         self.user = Account.create(
-                                     self.apiclient,
-                                     self.services["account"],
-                                )
+            self.apiclient,
+            self.services["account"],
+        )
         self.cleanup.append(self.user)
 
         self.services["vpc"]["cidr"] = "10.1.1.1/16"
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         vpc_1 = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc_1)
 
         self.services["vpc"]["cidr"] = "10.1.46.1/16"
         vpc_2 = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=self.vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.user.name,
-                         domainid=self.user.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.user.name,
+            domainid=self.user.domainid
+        )
         self.validate_vpc_network(vpc_2)
 
         self.debug("Validating list VPCs call by passing account and domain")
         vpcs = VPC.list(
-                        self.apiclient,
-                        account=self.user.name,
-                        domainid=self.user.domainid,
-                        listall=True
-                    )
+            self.apiclient,
+            account=self.user.name,
+            domainid=self.user.domainid,
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(vpcs, list),
-                    True,
-                    "List VPC should return a valid response"
-                )
+            isinstance(vpcs, list),
+            True,
+            "List VPC should return a valid response"
+        )
         vpc = vpcs[0]
         self.assertEqual(
-                        vpc.id,
-                        vpc_2.id,
-                        "List VPC should return VPC belonging to that account"
-                        )
+            vpc.id,
+            vpc_2.id,
+            "List VPC should return VPC belonging to that account"
+        )
         return
 
     @attr(tags=["advanced", "intervlan", "multiple"], required_hardware="true")
@@ -837,9 +856,9 @@ class TestVPC(cloudstackTestCase):
 
         self.debug("Creating a VPC offering..")
         vpc_off = VpcOffering.create(
-                                     self.apiclient,
-                                     self.services["vpc_offering"]
-                                     )
+            self.apiclient,
+            self.services["vpc_offering"]
+        )
 
         self.cleanup.append(vpc_off)
         self.validate_vpc_offering(vpc_off)
@@ -848,249 +867,250 @@ class TestVPC(cloudstackTestCase):
         vpc_off.update(self.apiclient, state='Enabled')
 
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         self.services["vpc"]["cidr"] = '10.1.1.1/16'
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.network_offering = NetworkOffering.create(
-                                            self.apiclient,
-                                            self.services["network_offering"],
-                                            conservemode=False
-                                            )
+            self.apiclient,
+            self.services["network_offering"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering)
 
         self.network_offering_no_lb = NetworkOffering.create(
-                                    self.apiclient,
-                                    self.services["network_offering_no_lb"],
-                                    conservemode=False
-                                    )
+            self.apiclient,
+            self.services["network_offering_no_lb"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering_no_lb.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering_no_lb)
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering_no_lb.id)
+                   self.network_offering_no_lb.id)
         network_1 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering_no_lb.id,
-                                zoneid=self.zone.id,
-                                gateway='10.1.1.1',
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering_no_lb.id,
+            zoneid=self.zone.id,
+            gateway='10.1.1.1',
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_1.id)
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering.id)
+                   self.network_offering.id)
         network_2 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering.id,
-                                zoneid=self.zone.id,
-                                gateway='10.1.2.1',
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering.id,
+            zoneid=self.zone.id,
+            gateway='10.1.2.1',
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_2.id)
 
         self.debug("deploying VMs in network: %s" % network_1.name)
         # Spawn an instance in that network
         vm_1 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_1.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_1.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_1.id)
 
         vm_2 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_1.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_1.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_1.id)
 
         self.debug("deploying VMs in network: %s" % network_2.name)
         # Spawn an instance in that network
         vm_3 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_2.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_2.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_2.id)
 
         vm_4 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_2.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_2.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_2.id)
 
         self.debug("Associating public IP for network: %s" % network_1.name)
         public_ip_1 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_1.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_1.ipaddress.ipaddress,
-                                        network_1.id
-                                        ))
-
-        nat_rule = NATRule.create(
-                                  self.apiclient,
-                                  vm_1,
-                                  self.services["natrule"],
-                                  ipaddressid=public_ip_1.ipaddress.id,
-                                  openfirewall=False,
-                                  networkid=network_1.id,
-                                  vpcid=vpc.id
-                                  )
+            public_ip_1.ipaddress.ipaddress,
+            network_1.id
+        ))
+
+        NATRule.create(
+            self.apiclient,
+            vm_1,
+            self.services["natrule"],
+            ipaddressid=public_ip_1.ipaddress.id,
+            openfirewall=False,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
 
         self.debug("Adding NetwrokACl rules to make NAT rule accessible")
-        nwacl_nat = NetworkACL.create(
-                                         self.apiclient,
-                                         networkid=network_1.id,
-                                         services=self.services["natrule"],
-                                         traffictype='Ingress'
-                                         )
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_1.id,
+            services=self.services["natrule"],
+            traffictype='Ingress'
+        )
 
         self.debug("Associating public IP for network: %s" % network_1.name)
         public_ip_2 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_1.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_2.ipaddress.ipaddress,
-                                        network_1.id
-                                        ))
+            public_ip_2.ipaddress.ipaddress,
+            network_1.id
+        ))
         self.debug("Enabling static NAT for IP: %s" %
-                                            public_ip_2.ipaddress.ipaddress)
+                   public_ip_2.ipaddress.ipaddress)
         try:
             StaticNATRule.enable(
-                              self.apiclient,
-                              ipaddressid=public_ip_2.ipaddress.id,
-                              virtualmachineid=vm_2.id,
-                              networkid=network_1.id
-                              )
+                self.apiclient,
+                ipaddressid=public_ip_2.ipaddress.id,
+                virtualmachineid=vm_2.id,
+                networkid=network_1.id
+            )
             self.debug("Static NAT enabled for IP: %s" %
-                                            public_ip_2.ipaddress.ipaddress)
+                       public_ip_2.ipaddress.ipaddress)
         except Exception as e:
             self.fail("Failed to enable static NAT on IP: %s - %s" % (
-                                        public_ip_2.ipaddress.ipaddress, e))
+                public_ip_2.ipaddress.ipaddress, e))
 
         public_ips = PublicIPAddress.list(
-                                    self.apiclient,
-                                    networkid=network_1.id,
-                                    listall=True,
-                                    isstaticnat=True,
-                                    account=self.account.name,
-                                    domainid=self.account.domainid
-                                  )
+            self.apiclient,
+            networkid=network_1.id,
+            listall=True,
+            isstaticnat=True,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.assertEqual(
-                         isinstance(public_ips, list),
-                         True,
-                         "List public Ip for network should list the Ip addr"
-                         )
+            isinstance(public_ips, list),
+            True,
+            "List public Ip for network should list the Ip addr"
+        )
         self.assertEqual(
-                         public_ips[0].ipaddress,
-                         public_ip_2.ipaddress.ipaddress,
-                         "List public Ip for network should list the Ip addr"
-                         )
+            public_ips[0].ipaddress,
+            public_ip_2.ipaddress.ipaddress,
+            "List public Ip for network should list the Ip addr"
+        )
 
         self.debug("Associating public IP for network: %s" % vpc.name)
         public_ip_3 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_2.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_2.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_3.ipaddress.ipaddress,
-                                        network_2.id
-                                        ))
+            public_ip_3.ipaddress.ipaddress,
+            network_2.id
+        ))
 
         self.debug("Creating LB rule for IP address: %s" %
-                                        public_ip_3.ipaddress.ipaddress)
+                   public_ip_3.ipaddress.ipaddress)
 
         lb_rule = LoadBalancerRule.create(
-                                    self.apiclient,
-                                    self.services["lbrule"],
-                                    ipaddressid=public_ip_3.ipaddress.id,
-                                    accountid=self.account.name,
-                                    networkid=network_2.id,
-                                    vpcid=vpc.id,
-                                    domainid=self.account.domainid
-                                )
+            self.apiclient,
+            self.services["lbrule"],
+            ipaddressid=public_ip_3.ipaddress.id,
+            accountid=self.account.name,
+            networkid=network_2.id,
+            vpcid=vpc.id,
+            domainid=self.account.domainid
+        )
 
         self.debug("Adding virtual machines %s and %s to LB rule" % (
-                                    vm_3.name, vm_4.name))
+            vm_3.name, vm_4.name))
         lb_rule.assign(self.apiclient, [vm_3, vm_4])
 
         self.debug("Adding NetwrokACl rules to make PF and LB accessible")
-        nwacl_lb = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_2.id,
-                                services=self.services["lbrule"],
-                                traffictype='Ingress'
-                                )
-
-        self.debug("Adding Egress rules to network %s and %s to allow access to internet")
-        nwacl_internet_1 = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_1.id,
-                                services=self.services["icmp_rule"],
-                                traffictype='Egress'
-                                )
-        nwacl_internet_2 = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_2.id,
-                                services=self.services["icmp_rule"],
-                                traffictype='Egress'
-                                )
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_2.id,
+            services=self.services["lbrule"],
+            traffictype='Ingress'
+        )
+
+        self.debug("Adding Egress rules to network %s and %s to allow\
+                    access to internet")
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_1.id,
+            services=self.services["icmp_rule"],
+            traffictype='Egress'
+        )
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_2.id,
+            services=self.services["icmp_rule"],
+            traffictype='Egress'
+        )
 
         self.debug("Checking if we can SSH into VM_1?")
         try:
             ssh_1 = vm_1.get_ssh_client(
-                                ipaddress=public_ip_1.ipaddress.ipaddress,
-                                reconnect=True,
-                                port=self.services["natrule"]["publicport"]
-                                )
+                ipaddress=public_ip_1.ipaddress.ipaddress,
+                reconnect=True,
+                port=self.services["natrule"]["publicport"]
+            )
             self.debug("SSH into VM is successfully")
 
             self.debug("Verifying if we can ping to outside world from VM?")
@@ -1103,61 +1123,61 @@ class TestVPC(cloudstackTestCase):
             # rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
         except Exception as e:
             self.fail("Failed to SSH into VM - %s, %s" %
-                                        (public_ip_1.ipaddress.ipaddress, e))
+                      (public_ip_1.ipaddress.ipaddress, e))
 
         result = str(res)
         self.debug("Result: %s" % result)
         self.assertEqual(
-                         result.count("1 received"),
-                         1,
-                         "Ping to outside world from VM should be successful"
-                         )
+            result.count("1 received"),
+            1,
+            "Ping to outside world from VM should be successful"
+        )
 
         self.debug("Checking if we can SSH into VM_2?")
         try:
             ssh_2 = vm_2.get_ssh_client(
-                                ipaddress=public_ip_2.ipaddress.ipaddress,
-                                reconnect=True,
-                                port=self.services["natrule"]["publicport"]
-                                )
+                ipaddress=public_ip_2.ipaddress.ipaddress,
+                reconnect=True,
+                port=self.services["natrule"]["publicport"]
+            )
             self.debug("SSH into VM is successfully")
 
             self.debug("Verifying if we can ping to outside world from VM?")
             res = ssh_2.execute("ping -c 1 www.google.com")
         except Exception as e:
             self.fail("Failed to SSH into VM - %s, %s" %
-                                        (public_ip_2.ipaddress.ipaddress, e))
+                      (public_ip_2.ipaddress.ipaddress, e))
 
         result = str(res)
         self.debug("Result: %s" % result)
         self.assertEqual(
-                         result.count("1 received"),
-                         1,
-                         "Ping to outside world from VM should be successful"
-                         )
+            result.count("1 received"),
+            1,
+            "Ping to outside world from VM should be successful"
+        )
 
         self.debug("Checking if we can SSH into VM using LB rule?")
         try:
             ssh_3 = vm_3.get_ssh_client(
-                            ipaddress=public_ip_3.ipaddress.ipaddress,
-                            reconnect=True,
-                            port=self.services["lbrule"]["publicport"]
-                            )
+                ipaddress=public_ip_3.ipaddress.ipaddress,
+                reconnect=True,
+                port=self.services["lbrule"]["publicport"]
+            )
             self.debug("SSH into VM is successfully")
 
             self.debug("Verifying if we can ping to outside world from VM?")
             res = ssh_3.execute("ping -c 1 www.google.com")
         except Exception as e:
             self.fail("Failed to SSH into VM - %s, %s" %
-                                        (public_ip_3.ipaddress.ipaddress, e))
+                      (public_ip_3.ipaddress.ipaddress, e))
 
         result = str(res)
         self.debug("Result: %s" % result)
         self.assertEqual(
-                         result.count("1 received"),
-                         1,
-                         "Ping to outside world from VM should be successful"
-                         )
+            result.count("1 received"),
+            1,
+            "Ping to outside world from VM should be successful"
+        )
         return
 
     @attr(tags=["advanced", "intervlan"], required_hardware="true")
@@ -1182,9 +1202,9 @@ class TestVPC(cloudstackTestCase):
         self.cleanup = []
         self.debug("Creating a VPC offering..")
         vpc_off = VpcOffering.create(
-                                     self.apiclient,
-                                     self.services["vpc_offering"]
-                                     )
+            self.apiclient,
+            self.services["vpc_offering"]
+        )
 
         self.cleanup.append(vpc_off)
         self.validate_vpc_offering(vpc_off)
@@ -1193,248 +1213,250 @@ class TestVPC(cloudstackTestCase):
         vpc_off.update(self.apiclient, state='Enabled')
 
         self.debug("creating a VPC network in the account: %s" %
-                                                    self.account.name)
+                   self.account.name)
         self.services["vpc"]["cidr"] = '10.1.1.1/16'
         vpc = VPC.create(
-                         self.apiclient,
-                         self.services["vpc"],
-                         vpcofferingid=vpc_off.id,
-                         zoneid=self.zone.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid
-                         )
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.validate_vpc_network(vpc)
 
         self.network_offering = NetworkOffering.create(
-                                            self.apiclient,
-                                            self.services["network_offering"],
-                                            conservemode=False
-                                            )
+            self.apiclient,
+            self.services["network_offering"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering)
 
         self.network_offering_no_lb = NetworkOffering.create(
-                                    self.apiclient,
-                                    self.services["network_offering_no_lb"],
-                                    conservemode=False
-                                    )
+            self.apiclient,
+            self.services["network_offering_no_lb"],
+            conservemode=False
+        )
         # Enable Network offering
         self.network_offering_no_lb.update(self.apiclient, state='Enabled')
         self.cleanup.append(self.network_offering_no_lb)
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering.id)
+                   self.network_offering.id)
         network_1 = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering_no_lb.id,
-                                zoneid=self.zone.id,
-                                gateway='10.1.1.1',
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering_no_lb.id,
+            zoneid=self.zone.id,
+            gateway='10.1.1.1',
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_1.id)
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                            self.network_offering_no_lb.id)
+                   self.network_offering_no_lb.id)
         network_2 = Network.create(
-                            self.apiclient,
-                            self.services["network"],
-                            accountid=self.account.name,
-                            domainid=self.account.domainid,
-                            networkofferingid=self.network_offering.id,
-                            zoneid=self.zone.id,
-                            gateway='10.1.2.1',
-                            vpcid=vpc.id
-                            )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering.id,
+            zoneid=self.zone.id,
+            gateway='10.1.2.1',
+            vpcid=vpc.id
+        )
         self.debug("Created network with ID: %s" % network_2.id)
 
         self.debug("deploying VMs in network: %s" % network_1.name)
         # Spawn an instance in that network
         vm_1 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_1.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_1.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_1.id)
 
         vm_2 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_1.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_1.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_1.id)
 
         self.debug("deploying VMs in network: %s" % network_2.name)
         # Spawn an instance in that network
         vm_3 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_2.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_2.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_2.id)
 
         vm_4 = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network_2.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network_2.id)]
+        )
         self.debug("Deployed VM in network: %s" % network_2.id)
 
         self.debug("Associating public IP for network: %s" % network_1.name)
         public_ip_1 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_1.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_1.ipaddress.ipaddress,
-                                        network_1.id
-                                        ))
-
-        nat_rule = NATRule.create(
-                                  self.apiclient,
-                                  vm_1,
-                                  self.services["natrule"],
-                                  ipaddressid=public_ip_1.ipaddress.id,
-                                  openfirewall=False,
-                                  networkid=network_1.id,
-                                  vpcid=vpc.id
-                                  )
+            public_ip_1.ipaddress.ipaddress,
+            network_1.id
+        ))
+
+        NATRule.create(
+            self.apiclient,
+            vm_1,
+            self.services["natrule"],
+            ipaddressid=public_ip_1.ipaddress.id,
+            openfirewall=False,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
 
         self.debug("Adding NetwrokACl rules to make NAT rule accessible")
-        nwacl_nat = NetworkACL.create(
-                                         self.apiclient,
-                                         networkid=network_1.id,
-                                         services=self.services["natrule"],
-                                         traffictype='Ingress'
-                                         )
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_1.id,
+            services=self.services["natrule"],
+            traffictype='Ingress'
+        )
 
         self.debug("Associating public IP for network: %s" % network_1.name)
         public_ip_2 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_1.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_1.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_2.ipaddress.ipaddress,
-                                        network_1.id
-                                        ))
+            public_ip_2.ipaddress.ipaddress,
+            network_1.id
+        ))
         self.debug("Enabling static NAT for IP: %s" %
-                                            public_ip_2.ipaddress.ipaddress)
+                   public_ip_2.ipaddress.ipaddress)
         try:
             StaticNATRule.enable(
-                              self.apiclient,
-                              ipaddressid=public_ip_2.ipaddress.id,
-                              virtualmachineid=vm_2.id,
-                              networkid=network_1.id
-                              )
+                self.apiclient,
+                ipaddressid=public_ip_2.ipaddress.id,
+                virtualmachineid=vm_2.id,
+                networkid=network_1.id
+            )
             self.debug("Static NAT enabled for IP: %s" %
-                                            public_ip_2.ipaddress.ipaddress)
+                       public_ip_2.ipaddress.ipaddress)
         except Exception as e:
             self.fail("Failed to enable static NAT on IP: %s - %s" % (
-                                        public_ip_2.ipaddress.ipaddress, e))
+                public_ip_2.ipaddress.ipaddress, e))
 
         public_ips = PublicIPAddress.list(
-                                    self.apiclient,
-                                    networkid=network_1.id,
-                                    listall=True,
-                                    isstaticnat=True,
-                                    account=self.account.name,
-                                    domainid=self.account.domainid
-                                  )
+            self.apiclient,
+            networkid=network_1.id,
+            listall=True,
+            isstaticnat=True,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
         self.assertEqual(
-                         isinstance(public_ips, list),
-                         True,
-                         "List public Ip for network should list the Ip addr"
-                         )
+            isinstance(public_ips, list),
+            True,
+            "List public Ip for network should list the Ip addr"
+        )
         self.assertEqual(
-                         public_ips[0].ipaddress,
-                         public_ip_2.ipaddress.ipaddress,
-                         "List public Ip for network should list the Ip addr"
-                         )
+            public_ips[0].ipaddress,
+            public_ip_2.ipaddress.ipaddress,
+            "List public Ip for network should list the Ip addr"
+        )
 
         self.debug("Associating public IP for network: %s" % vpc.name)
         public_ip_3 = PublicIPAddress.create(
-                                self.apiclient,
-                                accountid=self.account.name,
-                                zoneid=self.zone.id,
-                                domainid=self.account.domainid,
-                                networkid=network_2.id,
-                                vpcid=vpc.id
-                                )
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network_2.id,
+            vpcid=vpc.id
+        )
         self.debug("Associated %s with network %s" % (
-                                        public_ip_3.ipaddress.ipaddress,
-                                        network_2.id
-                                        ))
+            public_ip_3.ipaddress.ipaddress,
+            network_2.id
+        ))
 
         self.debug("Creating LB rule for IP address: %s" %
-                                        public_ip_3.ipaddress.ipaddress)
+                   public_ip_3.ipaddress.ipaddress)
 
         lb_rule = LoadBalancerRule.create(
-                                    self.apiclient,
-                                    self.services["lbrule"],
-                                    ipaddressid=public_ip_3.ipaddress.id,
-                                    accountid=self.account.name,
-                                    networkid=network_2.id,
-                                    vpcid=vpc.id,
-                                    domainid=self.account.domainid
-                                )
+            self.apiclient,
+            self.services["lbrule"],
+            ipaddressid=public_ip_3.ipaddress.id,
+            accountid=self.account.name,
+            networkid=network_2.id,
+            vpcid=vpc.id,
+            domainid=self.account.domainid
+        )
 
         self.debug("Adding virtual machines %s and %s to LB rule" % (
-                                    vm_3.name, vm_4.name))
+            vm_3.name, vm_4.name))
         lb_rule.assign(self.apiclient, [vm_3, vm_4])
 
         self.debug("Adding NetwrokACl rules to make PF and LB accessible")
-        nwacl_lb = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_2.id,
-                                services=self.services["lbrule"],
-                                traffictype='Ingress'
-                                )
-
-        self.debug("Adding Egress rules to network %s and %s to allow access to internet")
-        nwacl_internet_1 = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_1.id,
-                                services=self.services["icmp_rule"],
-                                traffictype='Egress'
-                                )
-        nwacl_internet_2 = NetworkACL.create(
-                                self.apiclient,
-                                networkid=network_2.id,
-                                services=self.services["icmp_rule"],
-                                traffictype='Egress'
-                                )
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_2.id,
+            services=self.services["lbrule"],
+            traffictype='Ingress'
+        )
+
+        self.debug(
+            "Adding Egress rules to network %s and %s to allow\
+            access to internet")
+        NetworkACL.create(
+            self.apiclient,
+            networkid=network_1.id,
+            services=self.services["icmp_rule"],
+ 

<TRUNCATED>

[26/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8096: Fixed test_ssvm.py for issues while checking the result of diagnostic scripts

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/77bd069c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/77bd069c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/77bd069c

Branch: refs/heads/reporter
Commit: 77bd069cc7b38bf93cc4e314d5d1ddcada56f11d
Parents: 5f9e4fd
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Mon Dec 22 14:49:25 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Tue Dec 23 14:27:03 2014 +0530

----------------------------------------------------------------------
 test/integration/smoke/test_ssvm.py     | 1100 ++++++++++++++------------
 tools/marvin/marvin/config/test_data.py |    3 +-
 2 files changed, 611 insertions(+), 492 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/77bd069c/test/integration/smoke/test_ssvm.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_ssvm.py b/test/integration/smoke/test_ssvm.py
index ed9eab0..0df0302 100644
--- a/test/integration/smoke/test_ssvm.py
+++ b/test/integration/smoke/test_ssvm.py
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -16,21 +16,29 @@
 # under the License.
 """ BVT tests for SSVM
 """
-#Import Local Modules
-import marvin
-from marvin.cloudstackTestCase import *
-from marvin.cloudstackAPI import *
-from marvin.sshClient import SshClient
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+# Import Local Modules
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackAPI import (stopSystemVm,
+                                  rebootSystemVm,
+                                  destroySystemVm)
+from marvin.lib.utils import (cleanup_resources,
+                              get_process_status,
+                              get_host_credentials)
+from marvin.lib.base import (PhysicalNetwork,
+                             NetScaler)
+from marvin.lib.common import (get_zone,
+                               list_hosts,
+                               list_ssvms,
+                               list_zones,
+                               list_vlan_ipranges)
 from nose.plugins.attrib import attr
 import telnetlib
 
-#Import System modules
+# Import System modules
 import time
 _multiprocess_shared_ = True
 
+
 class TestSSVMs(cloudstackTestCase):
 
     def setUp(self):
@@ -43,7 +51,7 @@ class TestSSVMs(cloudstackTestCase):
 
     def tearDown(self):
         try:
-            #Clean up, terminate the created templates
+            # Clean up, terminate the created templates
             cleanup_resources(self.apiclient, self.cleanup)
 
         except Exception as e:
@@ -55,9 +63,9 @@ class TestSSVMs(cloudstackTestCase):
 
         while True:
             list_host_response = list_hosts(
-                                                 self.apiclient,
-                                                 name=vmname
-                                                )
+                self.apiclient,
+                name=vmname
+            )
 
             if list_host_response and list_host_response[0].state == 'Up':
                 break
@@ -68,8 +76,14 @@ class TestSSVMs(cloudstackTestCase):
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
-
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="false")
     def test_01_list_sec_storage_vm(self):
         """Test List secondary storage VMs
         """
@@ -85,117 +99,129 @@ class TestSSVMs(cloudstackTestCase):
         # 5. DNS entries must match those given for the zone
 
         list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='secondarystoragevm',
-                                        state='Running',
-                                        )
-        self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        #Verify SSVM response
+            self.apiclient,
+            systemvmtype='secondarystoragevm',
+            state='Running',
+        )
+        self.assertEqual(
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+        # Verify SSVM response
         self.assertNotEqual(
-                            len(list_ssvm_response),
-                            0,
-                            "Check list System VMs response"
-                        )
+            len(list_ssvm_response),
+            0,
+            "Check list System VMs response"
+        )
 
         list_zones_response = list_zones(self.apiclient)
-        
+
         self.assertEqual(
-                            isinstance(list_zones_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            isinstance(list_zones_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
 
         self.debug("Number of zones: %s" % len(list_zones_response))
         self.debug("Number of SSVMs: %s" % len(list_ssvm_response))
         # Number of Sec storage VMs = No of Zones
         self.assertEqual(
-                            len(list_ssvm_response),
-                            len(list_zones_response),
-                            "Check number of SSVMs with number of zones"
-                        )
-        #For each secondary storage VM check private IP,
-        #public IP, link local IP and DNS
+            len(list_ssvm_response),
+            len(list_zones_response),
+            "Check number of SSVMs with number of zones"
+        )
+        # For each secondary storage VM check private IP,
+        # public IP, link local IP and DNS
         for ssvm in list_ssvm_response:
 
             self.debug("SSVM state: %s" % ssvm.state)
             self.assertEqual(
-                            ssvm.state,
-                            'Running',
-                            "Check whether state of SSVM is running"
-                        )
+                ssvm.state,
+                'Running',
+                "Check whether state of SSVM is running"
+            )
 
             self.assertEqual(
-                            hasattr(ssvm, 'privateip'),
-                            True,
-                            "Check whether SSVM has private IP field"
-                            )
+                hasattr(ssvm, 'privateip'),
+                True,
+                "Check whether SSVM has private IP field"
+            )
 
             self.assertEqual(
-                            hasattr(ssvm, 'linklocalip'),
-                            True,
-                            "Check whether SSVM has link local IP field"
-                            )
+                hasattr(ssvm, 'linklocalip'),
+                True,
+                "Check whether SSVM has link local IP field"
+            )
 
             self.assertEqual(
-                            hasattr(ssvm, 'publicip'),
-                            True,
-                            "Check whether SSVM has public IP field"
-                            )
+                hasattr(ssvm, 'publicip'),
+                True,
+                "Check whether SSVM has public IP field"
+            )
 
-            #Fetch corresponding ip ranges information from listVlanIpRanges
+            # Fetch corresponding ip ranges information from listVlanIpRanges
             ipranges_response = list_vlan_ipranges(
-                                                   self.apiclient,
-                                                   zoneid=ssvm.zoneid
-                                                   )
+                self.apiclient,
+                zoneid=ssvm.zoneid
+            )
             self.assertEqual(
-                            isinstance(ipranges_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+                isinstance(ipranges_response, list),
+                True,
+                "Check list response returns a valid list"
+            )
             iprange = ipranges_response[0]
-            
-            #Fetch corresponding Physical Network of SSVM's Zone
+
+            # Fetch corresponding Physical Network of SSVM's Zone
             listphyntwk = PhysicalNetwork.list(
-                            self.apiclient,
-                            zoneid=ssvm.zoneid
-                            )
-            
+                self.apiclient,
+                zoneid=ssvm.zoneid
+            )
+
             # Execute the following assertion in all zones except EIP-ELB Zones
-            if not (self.zone.networktype.lower() == 'basic' and isinstance(NetScaler.list(self.apiclient,physicalnetworkid=listphyntwk[0].id), list) is True):
+            if not (
+                self.zone.networktype.lower() == 'basic' and isinstance(
+                    NetScaler.list(
+                        self.apiclient,
+                        physicalnetworkid=listphyntwk[0].id),
+                    list) is True):
                 self.assertEqual(
-                            ssvm.gateway,
-                            iprange.gateway,
-                            "Check gateway with that of corresponding ip range"
-                            )
+                    ssvm.gateway,
+                    iprange.gateway,
+                    "Check gateway with that of corresponding ip range"
+                )
 
-            #Fetch corresponding zone information from listZones
+            # Fetch corresponding zone information from listZones
             zone_response = list_zones(
-                                       self.apiclient,
-                                       id=ssvm.zoneid
-                                       )
+                self.apiclient,
+                id=ssvm.zoneid
+            )
             self.assertEqual(
-                            isinstance(zone_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+                isinstance(zone_response, list),
+                True,
+                "Check list response returns a valid list"
+            )
             self.assertEqual(
-                            ssvm.dns1,
-                            zone_response[0].dns1,
-                            "Check DNS1 with that of corresponding zone"
-                            )
+                ssvm.dns1,
+                zone_response[0].dns1,
+                "Check DNS1 with that of corresponding zone"
+            )
 
             self.assertEqual(
-                            ssvm.dns2,
-                            zone_response[0].dns2,
-                            "Check DNS2 with that of corresponding zone"
-                            )
+                ssvm.dns2,
+                zone_response[0].dns2,
+                "Check DNS2 with that of corresponding zone"
+            )
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="false")
     def test_02_list_cpvm_vm(self):
         """Test List console proxy VMs
         """
@@ -211,111 +237,123 @@ class TestSSVMs(cloudstackTestCase):
         # 5. DNS entries must match those given for the zone
 
         list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        state='Running',
-                                        )
-        self.assertEqual(
-                            isinstance(list_cpvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        #Verify CPVM response
+            self.apiclient,
+            systemvmtype='consoleproxy',
+            state='Running',
+        )
+        self.assertEqual(
+            isinstance(list_cpvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+        # Verify CPVM response
         self.assertNotEqual(
-                            len(list_cpvm_response),
-                            0,
-                            "Check list System VMs response"
-                        )
+            len(list_cpvm_response),
+            0,
+            "Check list System VMs response"
+        )
         list_zones_response = list_zones(self.apiclient)
         # Number of Console Proxy VMs = No of Zones
-        
+
         self.assertEqual(
-                            isinstance(list_zones_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        
+            isinstance(list_zones_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+
         self.debug("Number of zones: %s" % len(list_zones_response))
         self.debug("Number of CPVMs: %s" % len(list_cpvm_response))
 
         self.assertEqual(
-                            len(list_cpvm_response),
-                            len(list_zones_response),
-                            "Check number of CPVMs with number of zones"
-                        )
-        #For each CPVM check private IP, public IP, link local IP and DNS
+            len(list_cpvm_response),
+            len(list_zones_response),
+            "Check number of CPVMs with number of zones"
+        )
+        # For each CPVM check private IP, public IP, link local IP and DNS
         for cpvm in list_cpvm_response:
 
             self.debug("CPVM state: %s" % cpvm.state)
             self.assertEqual(
-                            cpvm.state,
-                            'Running',
-                            "Check whether state of CPVM is running"
-                        )
+                cpvm.state,
+                'Running',
+                "Check whether state of CPVM is running"
+            )
 
             self.assertEqual(
-                            hasattr(cpvm, 'privateip'),
-                            True,
-                            "Check whether CPVM has private IP field"
-                            )
+                hasattr(cpvm, 'privateip'),
+                True,
+                "Check whether CPVM has private IP field"
+            )
 
             self.assertEqual(
-                            hasattr(cpvm, 'linklocalip'),
-                            True,
-                            "Check whether CPVM has link local IP field"
-                            )
+                hasattr(cpvm, 'linklocalip'),
+                True,
+                "Check whether CPVM has link local IP field"
+            )
 
             self.assertEqual(
-                            hasattr(cpvm, 'publicip'),
-                            True,
-                            "Check whether CPVM has public IP field"
-                            )
-            #Fetch corresponding ip ranges information from listVlanIpRanges
+                hasattr(cpvm, 'publicip'),
+                True,
+                "Check whether CPVM has public IP field"
+            )
+            # Fetch corresponding ip ranges information from listVlanIpRanges
             ipranges_response = list_vlan_ipranges(
-                                                   self.apiclient,
-                                                   zoneid=cpvm.zoneid
-                                                   )
+                self.apiclient,
+                zoneid=cpvm.zoneid
+            )
             self.assertEqual(
-                            isinstance(ipranges_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+                isinstance(ipranges_response, list),
+                True,
+                "Check list response returns a valid list"
+            )
             iprange = ipranges_response[0]
 
-            #Fetch corresponding Physical Network of SSVM's Zone
+            # Fetch corresponding Physical Network of SSVM's Zone
             listphyntwk = PhysicalNetwork.list(
-                            self.apiclient,
-                            zoneid=cpvm.zoneid
-                            )
-            
+                self.apiclient,
+                zoneid=cpvm.zoneid
+            )
+
             # Execute the following assertion in all zones except EIP-ELB Zones
-            if not (self.zone.networktype.lower() == 'basic' and isinstance(NetScaler.list(self.apiclient,physicalnetworkid=listphyntwk[0].id), list) is True):
+            if not (
+                self.zone.networktype.lower() == 'basic' and isinstance(
+                    NetScaler.list(
+                        self.apiclient,
+                        physicalnetworkid=listphyntwk[0].id),
+                    list) is True):
                 self.assertEqual(
-                            cpvm.gateway,
-                            iprange.gateway,
-                            "Check gateway with that of corresponding ip range"
-                            )
+                    cpvm.gateway,
+                    iprange.gateway,
+                    "Check gateway with that of corresponding ip range"
+                )
 
-            #Fetch corresponding zone information from listZones
+            # Fetch corresponding zone information from listZones
             zone_response = list_zones(
-                                       self.apiclient,
-                                       id=cpvm.zoneid
-                                       )
+                self.apiclient,
+                id=cpvm.zoneid
+            )
 
             self.assertEqual(
-                            cpvm.dns1,
-                            zone_response[0].dns1,
-                            "Check DNS1 with that of corresponding zone"
-                            )
+                cpvm.dns1,
+                zone_response[0].dns1,
+                "Check DNS1 with that of corresponding zone"
+            )
 
             self.assertEqual(
-                            cpvm.dns2,
-                            zone_response[0].dns2,
-                            "Check DNS2 with that of corresponding zone"
-                            )
+                cpvm.dns2,
+                zone_response[0].dns2,
+                "Check DNS2 with that of corresponding zone"
+            )
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_03_ssvm_internals(self):
         """Test SSVM Internals"""
 
@@ -329,106 +367,119 @@ class TestSSVMs(cloudstackTestCase):
         #    then the test is a failure
 
         list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='secondarystoragevm',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='secondarystoragevm',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         ssvm = list_ssvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=ssvm.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            id=ssvm.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
         host = hosts[0]
 
         self.debug("Running SSVM check script")
 
         if self.hypervisor.lower() in ('vmware', 'hyperv'):
-            #SSH into SSVMs is done via management server for Vmware and Hyper-V
+            # SSH into SSVMs is done via management server for Vmware and
+            # Hyper-V
             result = get_process_status(
-                                self.apiclient.connection.mgtSvr,
-                                22,
-                                self.apiclient.connection.user,
-                                self.apiclient.connection.passwd,
-                                ssvm.privateip,
-                                "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL",
-                                hypervisor=self.hypervisor
-                                )
+                self.apiclient.connection.mgtSvr,
+                22,
+                self.apiclient.connection.user,
+                self.apiclient.connection.passwd,
+                ssvm.privateip,
+                "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL",
+                hypervisor=self.hypervisor)
         else:
             try:
-                host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
+                host.user, host.passwd = get_host_credentials(
+                    self.config, host.ipaddress)
                 result = get_process_status(
-                                    host.ipaddress,
-                                    22,
-                                    host.user,
-                                    host.passwd,
-                                    ssvm.linklocalip,
-                                    "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL"
-                                )
+                    host.ipaddress,
+                    22,
+                    host.user,
+                    host.passwd,
+                    ssvm.linklocalip,
+                    "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL")
             except KeyError:
-                self.skipTest("Marvin configuration has no host credentials to check router services")
+                self.skipTest(
+                    "Marvin configuration has no host\
+                            credentials to check router services")
         res = str(result)
         self.debug("SSVM script output: %s" % res)
 
         self.assertEqual(
-                            res.count("ERROR"),
-                            1,
-                            "Check for Errors in tests"
-                        )
+            res.count("ERROR"),
+            1,
+            "Check for Errors in tests"
+        )
 
         self.assertEqual(
-                            res.count("WARNING"),
-                            1,
-                            "Check for warnings in tests"
-                        )
+            res.count("WARNING"),
+            1,
+            "Check for warnings in tests"
+        )
 
-        #Check status of cloud service
+        # Check status of cloud service
         if self.hypervisor.lower() in ('vmware', 'hyperv'):
-            #SSH into SSVMs is done via management server for Vmware and Hyper-V
+            # SSH into SSVMs is done via management server for Vmware and
+            # Hyper-V
             result = get_process_status(
-                                self.apiclient.connection.mgtSvr,
-                                22,
-                                self.apiclient.connection.user,
-                                self.apiclient.connection.passwd,
-                                ssvm.privateip,
-                                "service cloud status",
-                                hypervisor=self.hypervisor
-                                )
+                self.apiclient.connection.mgtSvr,
+                22,
+                self.apiclient.connection.user,
+                self.apiclient.connection.passwd,
+                ssvm.privateip,
+                "service cloud status",
+                hypervisor=self.hypervisor
+            )
         else:
             try:
-                host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
+                host.user, host.passwd = get_host_credentials(
+                    self.config, host.ipaddress)
                 result = get_process_status(
-                                    host.ipaddress,
-                                    22,
-                                    host.user,
-                                    host.passwd,
-                                    ssvm.linklocalip,
-                                    "service cloud status"
-                                    )
+                    host.ipaddress,
+                    22,
+                    host.user,
+                    host.passwd,
+                    ssvm.linklocalip,
+                    "service cloud status"
+                )
             except KeyError:
-                self.skipTest("Marvin configuration has no host credentials to check router services")
+                self.skipTest(
+                    "Marvin configuration has no host\
+                            credentials to check router services")
         res = str(result)
         self.debug("Cloud Process status: %s" % res)
         # cloud.com service (type=secstorage) is running: process id: 2346
         self.assertEqual(
-                            res.count("is running"),
-                            1,
-                            "Check cloud service is running or not"
-                        )
+            res.count("is running"),
+            1,
+            "Check cloud service is running or not"
+        )
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_04_cpvm_internals(self):
         """Test CPVM Internals"""
 
@@ -440,78 +491,89 @@ class TestSSVMs(cloudstackTestCase):
         #    running
 
         list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_cpvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='consoleproxy',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_cpvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         cpvm = list_cpvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=cpvm.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            id=cpvm.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
         host = hosts[0]
 
         try:
-            telnet = telnetlib.Telnet(
-                                      str(self.apiclient.connection.mgtSvr),
-                                      '8250'
-                                      )
-            self.debug("Telnet management server (IP: %s)" % 
-                                            self.apiclient.connection.mgtSvr)
+            telnetlib.Telnet(
+                str(self.apiclient.connection.mgtSvr),
+                '8250'
+            )
+            self.debug("Telnet management server (IP: %s)" %
+                       self.apiclient.connection.mgtSvr)
         except Exception as e:
             self.fail(
-                    "Telnet Access failed for %s: %s" % \
-                    (self.apiclient.connection.mgtSvr, e)
-                    )
+                "Telnet Access failed for %s: %s" %
+                (self.apiclient.connection.mgtSvr, e)
+            )
 
         self.debug("Checking cloud process status")
 
         if self.hypervisor.lower() in ('vmware', 'hyperv'):
-            #SSH into SSVMs is done via management server for Vmware and Hyper-V
+            # SSH into SSVMs is done via management server for Vmware and
+            # Hyper-V
             result = get_process_status(
-                                self.apiclient.connection.mgtSvr,
-                                22,
-                                self.apiclient.connection.user,
-                                self.apiclient.connection.passwd,
-                                cpvm.privateip,
-                                "service cloud status",
-                                hypervisor=self.hypervisor
-                                )
+                self.apiclient.connection.mgtSvr,
+                22,
+                self.apiclient.connection.user,
+                self.apiclient.connection.passwd,
+                cpvm.privateip,
+                "service cloud status",
+                hypervisor=self.hypervisor
+            )
         else:
             try:
-                host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
+                host.user, host.passwd = get_host_credentials(
+                    self.config, host.ipaddress)
                 result = get_process_status(
-                                    host.ipaddress,
-                                    22,
-                                    host.user,
-                                    host.passwd,
-                                    cpvm.linklocalip,
-                                    "service cloud status"
-                                    )
+                    host.ipaddress,
+                    22,
+                    host.user,
+                    host.passwd,
+                    cpvm.linklocalip,
+                    "service cloud status"
+                )
             except KeyError:
-                self.skipTest("Marvin configuration has no host credentials to check router services")
+                self.skipTest(
+                    "Marvin configuration has no host\
+                            credentials to check router services")
         res = str(result)
         self.debug("Cloud Process status: %s" % res)
         self.assertEqual(
-                            res.count("is running"),
-                            1,
-                            "Check cloud service is running or not"
-                        )
+            res.count("is running"),
+            1,
+            "Check cloud service is running or not"
+        )
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_05_stop_ssvm(self):
         """Test stop SSVM
         """
@@ -524,70 +586,81 @@ class TestSSVMs(cloudstackTestCase):
         # 3. If either of the two above steps fail the test is a failure
 
         list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='secondarystoragevm',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='secondarystoragevm',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         ssvm = list_ssvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=ssvm.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        host = hosts[0]
+            self.apiclient,
+            id=ssvm.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
 
         self.debug("Stopping SSVM: %s" % ssvm.id)
         cmd = stopSystemVm.stopSystemVmCmd()
         cmd.id = ssvm.id
         self.apiclient.stopSystemVm(cmd)
-        
+
         timeout = self.services["timeout"]
         while True:
             list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        id=ssvm.id
-                                        )
+                self.apiclient,
+                id=ssvm.id
+            )
             if isinstance(list_ssvm_response, list):
                 if list_ssvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List SSVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
-        
+
         self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         ssvm_response = list_ssvm_response[0]
         self.debug("SSVM state after debug: %s" % ssvm_response.state)
         self.assertEqual(
-                        ssvm_response.state,
-                        'Running',
-                        "Check whether SSVM is running or not"
-                        )
+            ssvm_response.state,
+            'Running',
+            "Check whether SSVM is running or not"
+        )
         # Wait for the agent to be up
         self.waitForSystemVMAgent(ssvm_response.name)
 
         # Call above tests to ensure SSVM is properly running
         self.test_01_list_sec_storage_vm()
+
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
         self.test_03_ssvm_internals()
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_06_stop_cpvm(self):
         """Test stop CPVM
         """
@@ -600,28 +673,27 @@ class TestSSVMs(cloudstackTestCase):
         # 3. If either of the two above steps fail the test is a failure
 
         list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_cpvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='consoleproxy',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_cpvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         cpvm = list_cpvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=cpvm.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        host = hosts[0]
+            self.apiclient,
+            id=cpvm.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
 
         self.debug("Stopping CPVM: %s" % cpvm.id)
         cmd = stopSystemVm.stopSystemVmCmd()
@@ -631,15 +703,15 @@ class TestSSVMs(cloudstackTestCase):
         timeout = self.services["timeout"]
         while True:
             list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        id=cpvm.id
-                                        )
+                self.apiclient,
+                id=cpvm.id
+            )
             if isinstance(list_cpvm_response, list):
                 if list_cpvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List CPVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
@@ -648,20 +720,32 @@ class TestSSVMs(cloudstackTestCase):
         self.debug("CPVM state after debug: %s" % cpvm_response.state)
 
         self.assertEqual(
-                        cpvm_response.state,
-                        'Running',
-                        "Check whether CPVM is running or not"
-                        )
+            cpvm_response.state,
+            'Running',
+            "Check whether CPVM is running or not"
+        )
 
         # Wait for the agent to be up
         self.waitForSystemVMAgent(cpvm_response.name)
 
         # Call above tests to ensure CPVM is properly running
         self.test_02_list_cpvm_vm()
+
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
         self.test_04_cpvm_internals()
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_07_reboot_ssvm(self):
         """Test reboot SSVM
         """
@@ -672,32 +756,31 @@ class TestSSVMs(cloudstackTestCase):
         # 3. The cloud process should still be running within the SSVM
 
         list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='secondarystoragevm',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-    
-        self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        
+            self.apiclient,
+            systemvmtype='secondarystoragevm',
+            state='Running',
+            zoneid=self.zone.id
+        )
+
+        self.assertEqual(
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+
         ssvm_response = list_ssvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=ssvm_response.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        host = hosts[0]
+            self.apiclient,
+            id=ssvm_response.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
 
-        #Store the public & private IP values before reboot
+        # Store the public & private IP values before reboot
         old_public_ip = ssvm_response.publicip
         old_private_ip = ssvm_response.privateip
 
@@ -709,46 +792,57 @@ class TestSSVMs(cloudstackTestCase):
         timeout = self.services["timeout"]
         while True:
             list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        id=ssvm_response.id
-                                        )
+                self.apiclient,
+                id=ssvm_response.id
+            )
             if isinstance(list_ssvm_response, list):
                 if list_ssvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List SSVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
         ssvm_response = list_ssvm_response[0]
         self.debug("SSVM State: %s" % ssvm_response.state)
         self.assertEqual(
-                        'Running',
-                        str(ssvm_response.state),
-                        "Check whether CPVM is running or not"
-                        )
+            'Running',
+            str(ssvm_response.state),
+            "Check whether CPVM is running or not"
+        )
 
         self.assertEqual(
-                    ssvm_response.publicip,
-                    old_public_ip,
-                    "Check Public IP after reboot with that of before reboot"
-                    )
+            ssvm_response.publicip,
+            old_public_ip,
+            "Check Public IP after reboot with that of before reboot"
+        )
 
         self.assertEqual(
-                    ssvm_response.privateip,
-                    old_private_ip,
-                    "Check Private IP after reboot with that of before reboot"
-                    )
+            ssvm_response.privateip,
+            old_private_ip,
+            "Check Private IP after reboot with that of before reboot"
+        )
 
         # Wait for the agent to be up
         self.waitForSystemVMAgent(ssvm_response.name)
 
-        #Call to verify cloud process is running
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
+        # Call to verify cloud process is running
         self.test_03_ssvm_internals()
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_08_reboot_cpvm(self):
         """Test reboot CPVM
         """
@@ -758,32 +852,30 @@ class TestSSVMs(cloudstackTestCase):
         #    the same before and after reboot
         # 3. the cloud process should still be running within the CPVM
 
-
         list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_cpvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='consoleproxy',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_cpvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         cpvm_response = list_cpvm_response[0]
 
         hosts = list_hosts(
-                           self.apiclient,
-                           id=cpvm_response.hostid
-                           )
-        self.assertEqual(
-                            isinstance(hosts, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
-        host = hosts[0]
+            self.apiclient,
+            id=cpvm_response.hostid
+        )
+        self.assertEqual(
+            isinstance(hosts, list),
+            True,
+            "Check list response returns a valid list"
+        )
 
-        #Store the public & private IP values before reboot
+        # Store the public & private IP values before reboot
         old_public_ip = cpvm_response.publicip
         old_private_ip = cpvm_response.privateip
 
@@ -796,15 +888,15 @@ class TestSSVMs(cloudstackTestCase):
         timeout = self.services["timeout"]
         while True:
             list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        id=cpvm_response.id
-                                        )
+                self.apiclient,
+                id=cpvm_response.id
+            )
             if isinstance(list_cpvm_response, list):
                 if list_cpvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List CPVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
@@ -812,30 +904,41 @@ class TestSSVMs(cloudstackTestCase):
 
         self.debug("CPVM state: %s" % cpvm_response.state)
         self.assertEqual(
-                        'Running',
-                        str(cpvm_response.state),
-                        "Check whether CPVM is running or not"
-                        )
+            'Running',
+            str(cpvm_response.state),
+            "Check whether CPVM is running or not"
+        )
 
         self.assertEqual(
-                    cpvm_response.publicip,
-                    old_public_ip,
-                    "Check Public IP after reboot with that of before reboot"
-                    )
+            cpvm_response.publicip,
+            old_public_ip,
+            "Check Public IP after reboot with that of before reboot"
+        )
 
         self.assertEqual(
-                    cpvm_response.privateip,
-                    old_private_ip,
-                    "Check Private IP after reboot with that of before reboot"
-                    )
+            cpvm_response.privateip,
+            old_private_ip,
+            "Check Private IP after reboot with that of before reboot"
+        )
         # Wait for the agent to be up
         self.waitForSystemVMAgent(cpvm_response.name)
 
-        #Call to verify cloud process is running
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
+        # Call to verify cloud process is running
         self.test_04_cpvm_internals()
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_09_destroy_ssvm(self):
         """Test destroy SSVM
         """
@@ -848,16 +951,16 @@ class TestSSVMs(cloudstackTestCase):
         # 4. cloud process within SSVM must be up and running
 
         list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='secondarystoragevm',
-                                        state='Running',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_ssvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='secondarystoragevm',
+            state='Running',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_ssvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         ssvm_response = list_ssvm_response[0]
 
         old_name = ssvm_response.name
@@ -870,16 +973,16 @@ class TestSSVMs(cloudstackTestCase):
         timeout = self.services["timeout"]
         while True:
             list_ssvm_response = list_ssvms(
-                                        self.apiclient,
-                                        zoneid=self.zone.id,
-                                        systemvmtype='secondarystoragevm'
-                                        )
+                self.apiclient,
+                zoneid=self.zone.id,
+                systemvmtype='secondarystoragevm'
+            )
             if isinstance(list_ssvm_response, list):
                 if list_ssvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List SSVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
@@ -888,36 +991,47 @@ class TestSSVMs(cloudstackTestCase):
         # Verify Name, Public IP, Private IP and Link local IP
         # for newly created SSVM
         self.assertNotEqual(
-                        ssvm_response.name,
-                        old_name,
-                        "Check SSVM new name with name of destroyed SSVM"
-                        )
+            ssvm_response.name,
+            old_name,
+            "Check SSVM new name with name of destroyed SSVM"
+        )
         self.assertEqual(
-                        hasattr(ssvm_response, 'privateip'),
-                        True,
-                        "Check whether SSVM has private IP field"
-                        )
+            hasattr(ssvm_response, 'privateip'),
+            True,
+            "Check whether SSVM has private IP field"
+        )
 
         self.assertEqual(
-                        hasattr(ssvm_response, 'linklocalip'),
-                        True,
-                        "Check whether SSVM has link local IP field"
-                        )
+            hasattr(ssvm_response, 'linklocalip'),
+            True,
+            "Check whether SSVM has link local IP field"
+        )
 
         self.assertEqual(
-                        hasattr(ssvm_response, 'publicip'),
-                        True,
-                        "Check whether SSVM has public IP field"
-                        )
-        
+            hasattr(ssvm_response, 'publicip'),
+            True,
+            "Check whether SSVM has public IP field"
+        )
+
         # Wait for the agent to be up
         self.waitForSystemVMAgent(ssvm_response.name)
 
-        #Call to verify cloud process is running
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
+        # Call to verify cloud process is running
         self.test_03_ssvm_internals()
         return
 
-    @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "sg"],
+        required_hardware="true")
     def test_10_destroy_cpvm(self):
         """Test destroy CPVM
         """
@@ -930,15 +1044,15 @@ class TestSSVMs(cloudstackTestCase):
         # 4. cloud process within CPVM must be up and running
 
         list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        zoneid=self.zone.id
-                                        )
-        self.assertEqual(
-                            isinstance(list_cpvm_response, list),
-                            True,
-                            "Check list response returns a valid list"
-                        )
+            self.apiclient,
+            systemvmtype='consoleproxy',
+            zoneid=self.zone.id
+        )
+        self.assertEqual(
+            isinstance(list_cpvm_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
         cpvm_response = list_cpvm_response[0]
 
         old_name = cpvm_response.name
@@ -951,16 +1065,16 @@ class TestSSVMs(cloudstackTestCase):
         timeout = self.services["timeout"]
         while True:
             list_cpvm_response = list_ssvms(
-                                        self.apiclient,
-                                        systemvmtype='consoleproxy',
-                                        zoneid=self.zone.id
-                                        )
+                self.apiclient,
+                systemvmtype='consoleproxy',
+                zoneid=self.zone.id
+            )
             if isinstance(list_cpvm_response, list):
                 if list_cpvm_response[0].state == 'Running':
                     break
             if timeout == 0:
                 raise Exception("List CPVM call failed!")
-            
+
             time.sleep(self.services["sleep"])
             timeout = timeout - 1
 
@@ -969,31 +1083,35 @@ class TestSSVMs(cloudstackTestCase):
         # Verify Name, Public IP, Private IP and Link local IP
         # for newly created CPVM
         self.assertNotEqual(
-                        cpvm_response.name,
-                        old_name,
-                        "Check SSVM new name with name of destroyed CPVM"
-                        )
+            cpvm_response.name,
+            old_name,
+            "Check SSVM new name with name of destroyed CPVM"
+        )
         self.assertEqual(
-                        hasattr(cpvm_response, 'privateip'),
-                        True,
-                        "Check whether CPVM has private IP field"
-                        )
+            hasattr(cpvm_response, 'privateip'),
+            True,
+            "Check whether CPVM has private IP field"
+        )
 
         self.assertEqual(
-                        hasattr(cpvm_response, 'linklocalip'),
-                        True,
-                        "Check whether CPVM has link local IP field"
-                        )
+            hasattr(cpvm_response, 'linklocalip'),
+            True,
+            "Check whether CPVM has link local IP field"
+        )
 
         self.assertEqual(
-                        hasattr(cpvm_response, 'publicip'),
-                        True,
-                        "Check whether CPVM has public IP field"
-                        )
-                
+            hasattr(cpvm_response, 'publicip'),
+            True,
+            "Check whether CPVM has public IP field"
+        )
+
         # Wait for the agent to be up
         self.waitForSystemVMAgent(cpvm_response.name)
 
-        #Call to verify cloud process is running
+        # Wait for some time before running diagnostic scripts on SSVM
+        # as it may take some time to start all service properly
+        time.sleep(int(self.services["configurableData"]["systemVmDelay"]))
+
+        # Call to verify cloud process is running
         self.test_04_cpvm_internals()
         return

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/77bd069c/tools/marvin/marvin/config/test_data.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py
index 9add0fa..f123dd0 100644
--- a/tools/marvin/marvin/config/test_data.py
+++ b/tools/marvin/marvin/config/test_data.py
@@ -1468,6 +1468,7 @@ test_data = {
                  "publicport": 22,
                  "username": "root",
                  "password": "password",
-        }
+        },
+        "systemVmDelay": 120
     }
 }


[40/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8122. Handle NPE thrown during migration failures.
When migration fails instead of returning NULL, throw the exception.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/a5a65c7b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/a5a65c7b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/a5a65c7b

Branch: refs/heads/reporter
Commit: a5a65c7b551ee5cc32588997937267b716eff681
Parents: ac491c9
Author: Likitha Shetty <li...@citrix.com>
Authored: Fri Dec 5 16:00:21 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 13:33:11 2014 +0530

----------------------------------------------------------------------
 .../orchestration/VolumeOrchestrator.java       |  4 +--
 .../com/cloud/storage/VolumeApiServiceImpl.java | 26 ++++++++++++--------
 2 files changed, 18 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a5a65c7b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index e04bd6d..1b87ccf 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -937,10 +937,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             return result.getVolume();
         } catch (InterruptedException e) {
             s_logger.debug("migrate volume failed", e);
-            return null;
+            throw new CloudRuntimeException(e.getMessage());
         } catch (ExecutionException e) {
             s_logger.debug("migrate volume failed", e);
-            return null;
+            throw new CloudRuntimeException(e.getMessage());
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a5a65c7b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
index c1652ed..7fa600a 100644
--- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
@@ -1795,6 +1795,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
                 if (jobResult != null) {
                     if (jobResult instanceof ConcurrentOperationException)
                         throw (ConcurrentOperationException)jobResult;
+                    else if (jobResult instanceof RuntimeException)
+                        throw (RuntimeException)jobResult;
                     else if (jobResult instanceof Throwable)
                         throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
                 }
@@ -1817,35 +1819,39 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
         assert (destPool != null);
 
         Volume newVol = null;
-        if (liveMigrateVolume) {
-            newVol = liveMigrateVolume(vol, destPool);
-        } else {
-            try {
+        try {
+            if (liveMigrateVolume) {
+                newVol = liveMigrateVolume(vol, destPool);
+            } else {
                 newVol = _volumeMgr.migrateVolume(vol, destPool);
-            } catch (StorageUnavailableException e) {
-                s_logger.debug("Failed to migrate volume", e);
             }
+        } catch (StorageUnavailableException e) {
+            s_logger.debug("Failed to migrate volume", e);
+            throw new CloudRuntimeException(e.getMessage());
+        }  catch (Exception e) {
+            s_logger.debug("Failed to migrate volume", e);
+            throw new CloudRuntimeException(e.getMessage());
         }
         return newVol;
     }
 
     @DB
-    protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) {
+    protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
         VolumeInfo vol = volFactory.getVolume(volume.getId());
         AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, (DataStore)destPool);
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
                 s_logger.debug("migrate volume failed:" + result.getResult());
-                return null;
+                throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
             }
             return result.getVolume();
         } catch (InterruptedException e) {
             s_logger.debug("migrate volume failed", e);
-            return null;
+            throw new CloudRuntimeException(e.getMessage());
         } catch (ExecutionException e) {
             s_logger.debug("migrate volume failed", e);
-            return null;
+            throw new CloudRuntimeException(e.getMessage());
         }
     }
 


[29/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
Revert "CLOUDSTACK-7762 -[Automation] - Fix test failure for test_02_revert_vm_snapshots in smoke/test_vm_snapshots.py"

This reverts commit f510ef995baaa9addefc22ff0330cd51dee1dd95.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0d75682a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0d75682a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0d75682a

Branch: refs/heads/reporter
Commit: 0d75682a341162b45afd1bc7784a4aaffd1a5aa6
Parents: cb211f1
Author: SrikanteswaraRao Talluri <ta...@apache.org>
Authored: Tue Dec 23 14:29:47 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Tue Dec 23 14:46:10 2014 +0530

----------------------------------------------------------------------
 test/integration/smoke/test_vm_snapshots.py | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0d75682a/test/integration/smoke/test_vm_snapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py
index ef1353a..131da99 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -202,9 +202,6 @@ class TestVmSnapshot(cloudstackTestCase):
             "Check the snapshot of vm is ready!"
         )
 
-	# Stop Virtual machine befor reverting VM to a snapshot taken without memory  	
-	self.virtual_machine.stop(self.apiclient)
-
         VmSnapshot.revertToSnapshot(self.apiclient, list_snapshot_response[0].id)
 
         list_vm_response = list_virtual_machines(


[14/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8103: Vmsync marks VM as stopped even after failing to stop it in HV
During vmsync if StopCommand (issued as part of PowerOff/PowerMissing report) fails to stop VM (since VM is running on HV),
don't transition VM state to "Stopped" in CS db. Also added a check to throw ConcurrentOperationException if vm state is not
"Running" after start operation.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/331e257c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/331e257c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/331e257c

Branch: refs/heads/reporter
Commit: 331e257ca0ecb76e48f56422797e547d16d6944b
Parents: e559b15
Author: Koushik Das <ko...@apache.org>
Authored: Mon Dec 22 10:52:13 2014 +0530
Committer: Koushik Das <ko...@apache.org>
Committed: Mon Dec 22 10:52:13 2014 +0530

----------------------------------------------------------------------
 .../src/com/cloud/vm/VirtualMachineManagerImpl.java           | 5 ++++-
 server/src/com/cloud/vm/UserVmManagerImpl.java                | 7 +++++++
 2 files changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/331e257c/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 6d513d5..caf374e 100644
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -3696,7 +3696,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
             VirtualMachineGuru vmGuru = getVmGuru(vm);
             VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
-            sendStop(vmGuru, profile, true, true);
+            if (!sendStop(vmGuru, profile, true, true)) {
+                // In case StopCommand fails, don't proceed further
+                return;
+            }
 
             try {
                 stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOffReport, null);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/331e257c/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 674a4c1..03c9a23 100644
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -3221,6 +3221,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
         try {
             vmParamPair = startVirtualMachine(vmId, hostId, additonalParams, deploymentPlannerToUse);
             vm = vmParamPair.first();
+
+            // At this point VM should be in "Running" state
+            UserVmVO tmpVm = _vmDao.findById(vm.getId());
+            if (!tmpVm.getState().equals(State.Running)) {
+                // Some other thread changed state of VM, possibly vmsync
+                throw new ConcurrentOperationException("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state");
+            }
         } finally {
             updateVmStateForFailedVmCreation(vm.getId(), hostId);
         }


[21/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8110. VM name in vCenter should be VM's DisplayName if global config 'vm.instancename.flag' is set.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/51ee9007
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/51ee9007
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/51ee9007

Branch: refs/heads/reporter
Commit: 51ee90074d5b53be261dcc746e4bd8a1058dfb6a
Parents: 507d9d3
Author: Likitha Shetty <li...@citrix.com>
Authored: Fri Oct 31 16:26:29 2014 +0530
Committer: Sanjay Tripathi <sa...@citrix.com>
Committed: Tue Dec 23 11:42:26 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/hypervisor/vmware/resource/VmwareResource.java   | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/51ee9007/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index d175d85..2e7553a 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -1834,10 +1834,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
     private Pair<String, String> composeVmNames(VirtualMachineTO vmSpec) {
         String vmInternalCSName = vmSpec.getName();
         String vmNameOnVcenter = vmSpec.getName();
-        if (vmSpec.getType() == VirtualMachine.Type.User && _instanceNameFlag && vmSpec.getHostName() != null) {
-            String[] tokens = vmInternalCSName.split("-");
-            assert (tokens.length >= 3); // vmInternalCSName has format i-x-y-<instance.name>
-            vmNameOnVcenter = String.format("%s-%s-%s-%s", tokens[0], tokens[1], tokens[2], vmSpec.getHostName());
+        if (_instanceNameFlag && vmSpec.getHostName() != null) {
+            vmNameOnVcenter = vmSpec.getHostName();
         }
         return new Pair<String, String>(vmInternalCSName, vmNameOnVcenter);
     }


[28/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8098: Fixed VM snapshot issue in smoke/test_vm_snapshots.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/fc2c1a09
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/fc2c1a09
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/fc2c1a09

Branch: refs/heads/reporter
Commit: fc2c1a09a7064aa9121ead7c9f88fb268698c22e
Parents: 0d75682
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Fri Dec 19 17:38:06 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Tue Dec 23 14:46:10 2014 +0530

----------------------------------------------------------------------
 test/integration/smoke/test_vm_snapshots.py | 100 ++++++++++++-----------
 1 file changed, 51 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fc2c1a09/test/integration/smoke/test_vm_snapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py
index 131da99..94c0f33 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -19,7 +19,6 @@
 from marvin.codes import FAILED, KVM
 from nose.plugins.attrib import attr
 from marvin.cloudstackTestCase import cloudstackTestCase, unittest
-from marvin.cloudstackAPI import startVirtualMachine
 from marvin.lib.utils import random_gen, cleanup_resources
 from marvin.lib.base import (Account,
                              ServiceOffering,
@@ -27,10 +26,10 @@ from marvin.lib.base import (Account,
                              VmSnapshot)
 from marvin.lib.common import (get_zone,
                                get_domain,
-                               get_template,
-                               list_virtual_machines)
+                               get_template)
 import time
 
+
 class TestVmSnapshot(cloudstackTestCase):
 
     @classmethod
@@ -39,7 +38,8 @@ class TestVmSnapshot(cloudstackTestCase):
 
         hypervisor = testClient.getHypervisorInfo()
         if hypervisor.lower() in (KVM.lower(), "hyperv", "lxc"):
-            raise unittest.SkipTest("VM snapshot feature is not supported on KVM, Hyper-V or LXC")
+            raise unittest.SkipTest(
+                "VM snapshot feature is not supported on KVM, Hyper-V or LXC")
 
         cls.apiclient = testClient.getApiClient()
         cls.services = testClient.getParsedTestDataConfig()
@@ -48,12 +48,13 @@ class TestVmSnapshot(cloudstackTestCase):
         cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
 
         template = get_template(
-                    cls.apiclient,
-                    cls.zone.id,
-                    cls.services["ostype"]
-                    )
+            cls.apiclient,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         if template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
+            assert False, "get_template() failed to return template\
+                    with description %s" % cls.services["ostype"]
 
         cls.services["domainid"] = cls.domain.id
         cls.services["server"]["zoneid"] = cls.zone.id
@@ -62,31 +63,31 @@ class TestVmSnapshot(cloudstackTestCase):
 
         # Create VMs, NAT Rules etc
         cls.account = Account.create(
-                    cls.apiclient,
-                    cls.services["account"],
-                    domainid=cls.domain.id
-                    )
+            cls.apiclient,
+            cls.services["account"],
+            domainid=cls.domain.id
+        )
 
         cls.service_offering = ServiceOffering.create(
-                            cls.apiclient,
-                            cls.services["service_offerings"]
-                            )
+            cls.apiclient,
+            cls.services["service_offerings"]
+        )
         cls.virtual_machine = VirtualMachine.create(
-                    cls.apiclient,
-                    cls.services["server"],
-                    templateid=template.id,
-                    accountid=cls.account.name,
-                    domainid=cls.account.domainid,
-                    serviceofferingid=cls.service_offering.id,
-                    mode=cls.zone.networktype
-                    )
+            cls.apiclient,
+            cls.services["server"],
+            templateid=template.id,
+            accountid=cls.account.name,
+            domainid=cls.account.domainid,
+            serviceofferingid=cls.service_offering.id,
+            mode=cls.zone.networktype
+        )
         cls.random_data_0 = random_gen(size=100)
         cls.test_dir = "/tmp"
         cls.random_data = "random.data"
         cls._cleanup = [
-                cls.service_offering,
-                cls.account,
-                ]
+            cls.service_offering,
+            cls.account,
+        ]
         return
 
     @classmethod
@@ -122,9 +123,10 @@ class TestVmSnapshot(cloudstackTestCase):
             ssh_client = self.virtual_machine.get_ssh_client()
 
             cmds = [
-                "echo %s > %s/%s" % (self.random_data_0, self.test_dir, self.random_data),
-                "cat %s/%s" % (self.test_dir, self.random_data)
-            ]
+                "echo %s > %s/%s" %
+                (self.random_data_0, self.test_dir, self.random_data),
+                "cat %s/%s" %
+                (self.test_dir, self.random_data)]
 
             for c in cmds:
                 self.debug(c)
@@ -183,7 +185,10 @@ class TestVmSnapshot(cloudstackTestCase):
 
         time.sleep(self.services["sleep"])
 
-        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+        list_snapshot_response = VmSnapshot.list(
+            self.apiclient,
+            vmid=self.virtual_machine.id,
+            listall=True)
 
         self.assertEqual(
             isinstance(list_snapshot_response, list),
@@ -202,24 +207,13 @@ class TestVmSnapshot(cloudstackTestCase):
             "Check the snapshot of vm is ready!"
         )
 
-        VmSnapshot.revertToSnapshot(self.apiclient, list_snapshot_response[0].id)
+        self.virtual_machine.stop(self.apiclient)
 
-        list_vm_response = list_virtual_machines(
+        VmSnapshot.revertToSnapshot(
             self.apiclient,
-            id=self.virtual_machine.id
-        )
+            list_snapshot_response[0].id)
 
-        self.assertEqual(
-            list_vm_response[0].state,
-            "Stopped",
-            "Check the state of vm is Stopped!"
-        )
-
-        cmd = startVirtualMachine.startVirtualMachineCmd()
-        cmd.id = list_vm_response[0].id
-        self.apiclient.startVirtualMachine(cmd)
-
-        time.sleep(self.services["sleep"])
+        self.virtual_machine.start(self.apiclient)
 
         try:
             ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
@@ -248,7 +242,10 @@ class TestVmSnapshot(cloudstackTestCase):
         """Test to delete vm snapshots
         """
 
-        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+        list_snapshot_response = VmSnapshot.list(
+            self.apiclient,
+            vmid=self.virtual_machine.id,
+            listall=True)
 
         self.assertEqual(
             isinstance(list_snapshot_response, list),
@@ -260,11 +257,16 @@ class TestVmSnapshot(cloudstackTestCase):
             None,
             "Check if snapshot exists in ListSnapshot"
         )
-        VmSnapshot.deleteVMSnapshot(self.apiclient, list_snapshot_response[0].id)
+        VmSnapshot.deleteVMSnapshot(
+            self.apiclient,
+            list_snapshot_response[0].id)
 
         time.sleep(self.services["sleep"] * 3)
 
-        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+        list_snapshot_response = VmSnapshot.list(
+            self.apiclient,
+            vmid=self.virtual_machine.id,
+            listall=True)
 
         self.assertEqual(
             list_snapshot_response,


[46/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8129. Cold migration of VM across VMware DCs leaves the VM behind in the source host.
If VM has been cold migrated across different VMware DCs, then unregister the VM from source host.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/15b34863
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/15b34863
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/15b34863

Branch: refs/heads/reporter
Commit: 15b348632df2049347f58c87830be2c02eee3b61
Parents: 5227ae2
Author: Likitha Shetty <li...@citrix.com>
Authored: Thu Dec 18 19:38:14 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Fri Dec 26 11:07:30 2014 +0530

----------------------------------------------------------------------
 .../cloud/agent/api/UnregisterVMCommand.java    |  9 +++++++
 .../com/cloud/vm/VirtualMachineManagerImpl.java | 27 ++++++++++++++++++++
 .../src/com/cloud/dc/ClusterDetailsDao.java     |  2 ++
 .../src/com/cloud/dc/ClusterDetailsDaoImpl.java | 10 ++++++++
 .../vmware/resource/VmwareResource.java         | 18 +++++++++----
 .../com/cloud/storage/VolumeApiServiceImpl.java | 19 ++++++++++++++
 6 files changed, 80 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/core/src/com/cloud/agent/api/UnregisterVMCommand.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/agent/api/UnregisterVMCommand.java b/core/src/com/cloud/agent/api/UnregisterVMCommand.java
index a0085e0..16eb4ba 100644
--- a/core/src/com/cloud/agent/api/UnregisterVMCommand.java
+++ b/core/src/com/cloud/agent/api/UnregisterVMCommand.java
@@ -21,6 +21,7 @@ package com.cloud.agent.api;
 
 public class UnregisterVMCommand extends Command {
     String vmName;
+    boolean cleanupVmFiles = false;
 
     public UnregisterVMCommand(String vmName) {
         this.vmName = vmName;
@@ -34,4 +35,12 @@ public class UnregisterVMCommand extends Command {
     public String getVmName() {
         return vmName;
     }
+
+    public void setCleanupVmFiles(boolean cleanupVmFiles) {
+        this.cleanupVmFiles = cleanupVmFiles;
+    }
+
+    public boolean getCleanupVmFiles() {
+        return this.cleanupVmFiles;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index be5ea63..e9cd79c 100644
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -96,6 +96,7 @@ import com.cloud.agent.api.StopAnswer;
 import com.cloud.agent.api.StopCommand;
 import com.cloud.agent.api.UnPlugNicAnswer;
 import com.cloud.agent.api.UnPlugNicCommand;
+import com.cloud.agent.api.UnregisterVMCommand;
 import com.cloud.agent.api.to.DiskTO;
 import com.cloud.agent.api.to.GPUDeviceTO;
 import com.cloud.agent.api.to.NicTO;
@@ -1727,6 +1728,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
     private void orchestrateStorageMigration(String vmUuid, StoragePool destPool) {
         VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
+        Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
+        HostVO srcHost = _hostDao.findById(srchostId);
+        Long srcClusterId = srcHost.getClusterId();
 
         try {
             stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null);
@@ -1752,6 +1756,29 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool
                 vm.setLastHostId(null);
                 vm.setPodIdToDeployIn(destPool.getPodId());
+
+                // If VM was cold migrated between clusters belonging to two different VMware DCs,
+                // unregister the VM from the source host and cleanup the associated VM files.
+                if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
+                    Long destClusterId = destPool.getClusterId();
+                    if (srcClusterId != null && destClusterId != null && srcClusterId != destClusterId) {
+                        String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
+                        String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
+                        if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
+                            s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
+                                    " from source host: " + srcHost.getId());
+                            UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
+                            uvc.setCleanupVmFiles(true);
+                            try {
+                                _agentMgr.send(srcHost.getId(), uvc);
+                            } catch (Exception e) {
+                                throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() +
+                                        " after successfully migrating VM's storage across VMware Datacenters");
+                            }
+                        }
+                    }
+                }
+
             } else {
                 s_logger.debug("Storage migration failed");
             }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/engine/schema/src/com/cloud/dc/ClusterDetailsDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/ClusterDetailsDao.java b/engine/schema/src/com/cloud/dc/ClusterDetailsDao.java
index 49250d9..06c9c52 100644
--- a/engine/schema/src/com/cloud/dc/ClusterDetailsDao.java
+++ b/engine/schema/src/com/cloud/dc/ClusterDetailsDao.java
@@ -30,4 +30,6 @@ public interface ClusterDetailsDao extends GenericDao<ClusterDetailsVO, Long> {
     ClusterDetailsVO findDetail(long clusterId, String name);
 
     void deleteDetails(long clusterId);
+
+    String getVmwareDcName(Long clusterId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java
index 0d6b833..c9397c2 100644
--- a/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java
@@ -139,4 +139,14 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long
         ClusterDetailsVO vo = findDetail(id, key.key());
         return vo == null ? null : vo.getValue();
     }
+
+    @Override
+    public String getVmwareDcName(Long clusterId) {
+        String dcName = null;
+        String url = findDetail(clusterId, "url").getValue();
+        String[] tokens = url.split("/"); // Cluster URL format is 'http://vcenter/dc/cluster'
+        if (tokens != null && tokens.length > 3)
+            dcName = tokens[3];
+        return dcName;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 0dfde45..b1a4380 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -1761,7 +1761,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
             // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it.
             if (existingVmName != null && existingVmFileLayout != null) {
-                deleteUnregisteredVmFiles(existingVmFileLayout, dcMo);
+                deleteUnregisteredVmFiles(existingVmFileLayout, dcMo, true);
             }
 
             return startAnswer;
@@ -2239,7 +2239,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         }
     }
 
-    private void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo) throws Exception {
+    private void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo, boolean deleteDisks) throws Exception {
         s_logger.debug("Deleting files associated with an existing VM that was unregistered");
         DatastoreFile vmFolder = null;
         try {
@@ -2258,7 +2258,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             // Delete files that are present in the VM folder - this will take care of the VM disks as well.
             DatastoreMO vmFolderDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(vmFolder.getDatastoreName()));
             String[] files = vmFolderDsMo.listDirContent(vmFolder.getPath());
-            if (files.length != 0) {
+            if (deleteDisks) {
                 for (String file : files) {
                     String vmDiskFileFullPath = String.format("%s/%s", vmFolder.getPath(), file);
                     s_logger.debug("Deleting file: " + vmDiskFileFullPath);
@@ -2266,8 +2266,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 }
             }
             // Delete VM folder
-            s_logger.debug("Deleting folder: " + vmFolder.getPath());
-            vmFolderDsMo.deleteFolder(vmFolder.getPath(), dcMo.getMor());
+            if (deleteDisks || files.length == 0) {
+                s_logger.debug("Deleting folder: " + vmFolder.getPath());
+                vmFolderDsMo.deleteFolder(vmFolder.getPath(), dcMo.getMor());
+            }
         } catch (Exception e) {
             String message = "Failed to delete files associated with an existing VM that was unregistered due to " + VmwareHelper.getExceptionMessage(e);
             s_logger.warn(message, e);
@@ -2765,6 +2767,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
                 try {
                     vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, "0");
+                    vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_VM_INTERNAL_NAME, cmd.getVmName());
 
                     if (getVmPowerState(vmMo) != PowerState.PowerOff) {
                         if (vmMo.safePowerOff(_shutdownWaitMs)) {
@@ -3895,10 +3898,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         VmwareContext context = getServiceContext();
         VmwareHypervisorHost hyperHost = getHyperHost(context);
         try {
+            DatacenterMO dataCenterMo = new DatacenterMO(getServiceContext(), hyperHost.getHyperHostDatacenter());
             VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName());
             if (vmMo != null) {
                 try {
+                    VirtualMachineFileLayoutEx vmFileLayout = vmMo.getFileLayout();
                     context.getService().unregisterVM(vmMo.getMor());
+                    if (cmd.getCleanupVmFiles()) {
+                        deleteUnregisteredVmFiles(vmFileLayout, dataCenterMo, false);
+                    }
                     return new Answer(cmd, true, "unregister succeeded");
                 } catch (Exception e) {
                     s_logger.warn("We are not able to unregister VM " + VmwareHelper.getExceptionMessage(e));

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15b34863/server/src/com/cloud/storage/VolumeApiServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
index 7fa600a..ca83890 100644
--- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
@@ -79,6 +79,7 @@ import com.cloud.api.ApiDBUtils;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.ConfigurationManager;
 import com.cloud.configuration.Resource.ResourceType;
+import com.cloud.dc.ClusterDetailsDao;
 import com.cloud.dc.ClusterVO;
 import com.cloud.dc.DataCenter;
 import com.cloud.dc.DataCenterVO;
@@ -221,6 +222,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
     AsyncJobManager _jobMgr;
     @Inject
     VmWorkJobDao _workJobDao;
+    @Inject
+    ClusterDetailsDao _clusterDetailsDao;
 
     private List<StoragePoolAllocator> _storagePoolAllocators;
 
@@ -1761,6 +1764,22 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
                         throw new InvalidParameterValueException("Cannot migrate a volume of a virtual machine to a storage pool in a different cluster");
                     }
                 }
+                // In case of VMware, if ROOT volume is being cold-migrated, then ensure destination storage pool is in the same Datacenter as the VM.
+                if (vm != null && vm.getHypervisorType().equals(HypervisorType.VMware)) {
+                    if (!liveMigrateVolume && vol.volumeType.equals(Volume.Type.ROOT)) {
+                        Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
+                        HostVO host = _hostDao.findById(hostId);
+                        if (host != null)
+                            srcClusterId = host.getClusterId();
+                        if (srcClusterId != null && destPool.getClusterId() != null && !srcClusterId.equals(destPool.getClusterId())) {
+                            String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
+                            String destDcName = _clusterDetailsDao.getVmwareDcName(destPool.getClusterId());
+                            if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
+                                throw new InvalidParameterValueException("Cannot migrate ROOT volume of a stopped VM to a storage pool in a different VMware datacenter");
+                            }
+                        }
+                    }
+                }
             }
         } else {
             throw new InvalidParameterValueException("Migration of volume from local storage pool is not supported");


[19/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8108. vCenter admin name is logged in clear text.
Revert back to TRACE logging while retrieving and recycling VMware context from the context pool.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0f224c85
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0f224c85
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0f224c85

Branch: refs/heads/reporter
Commit: 0f224c858fd3612c66632ba3476bfc3f1f71e1df
Parents: a75a431
Author: Likitha Shetty <li...@citrix.com>
Authored: Wed Oct 22 14:40:39 2014 +0530
Committer: Sanjay Tripathi <sa...@citrix.com>
Committed: Tue Dec 23 10:35:06 2014 +0530

----------------------------------------------------------------------
 .../cloud/hypervisor/vmware/util/VmwareContextPool.java | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0f224c85/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContextPool.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContextPool.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContextPool.java
index e28c974..c97c01f 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContextPool.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContextPool.java
@@ -83,8 +83,8 @@ public class VmwareContextPool {
                 VmwareContext context = l.remove(0);
                 context.setPoolInfo(this, poolKey);
 
-                if (s_logger.isInfoEnabled())
-                    s_logger.info("Return a VmwareContext from the idle pool: " + poolKey + ". current pool size: " + l.size() + ", outstanding count: " +
+                if (s_logger.isTraceEnabled())
+                    s_logger.trace("Return a VmwareContext from the idle pool: " + poolKey + ". current pool size: " + l.size() + ", outstanding count: " +
                         VmwareContext.getOutstandingContextCount());
                 return context;
             }
@@ -108,12 +108,12 @@ public class VmwareContextPool {
                 context.clearStockObjects();
                 l.add(context);
 
-                if (s_logger.isInfoEnabled())
-                    s_logger.info("Recycle VmwareContext into idle pool: " + context.getPoolKey() + ", current idle pool size: " + l.size() + ", outstanding count: " +
+                if (s_logger.isTraceEnabled())
+                    s_logger.trace("Recycle VmwareContext into idle pool: " + context.getPoolKey() + ", current idle pool size: " + l.size() + ", outstanding count: " +
                         VmwareContext.getOutstandingContextCount());
             } else {
-                if (s_logger.isInfoEnabled())
-                    s_logger.info("VmwareContextPool queue exceeds limits, queue size: " + l.size());
+                if (s_logger.isTraceEnabled())
+                    s_logger.trace("VmwareContextPool queue exceeds limits, queue size: " + l.size());
                 context.close();
             }
         }


[04/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8087: Fixed test_vpc_on_host_maintenance.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/162f61b7
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/162f61b7
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/162f61b7

Branch: refs/heads/reporter
Commit: 162f61b73fa0e8faa981bc090df27fea8afc4c50
Parents: 95b5584
Author: Ashutosh K <as...@clogeny.com>
Authored: Thu Dec 18 11:58:56 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Fri Dec 19 10:15:25 2014 +0530

----------------------------------------------------------------------
 .../maint/test_vpc_on_host_maintenance.py       | 161 +++++++++----------
 1 file changed, 73 insertions(+), 88 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/162f61b7/test/integration/component/maint/test_vpc_on_host_maintenance.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/test_vpc_on_host_maintenance.py b/test/integration/component/maint/test_vpc_on_host_maintenance.py
index eb3360a..8ee50bf 100644
--- a/test/integration/component/maint/test_vpc_on_host_maintenance.py
+++ b/test/integration/component/maint/test_vpc_on_host_maintenance.py
@@ -16,62 +16,64 @@
 # under the License.
 
 from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import *
-from marvin.cloudstackAPI import *
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+from marvin.cloudstackTestCase import cloudstackTestCase, unittest
+from marvin.lib.utils import cleanup_resources
+from marvin.lib.base import (Account,
+                             Host,
+                             VPC,
+                             VpcOffering)
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template,
+                               list_configurations)
+import time
 
 
 class Services:
+
     """Test VPC services
     """
 
     def __init__(self):
         self.services = {
-                         "account": {
-                                    "email": "test@test.com",
-                                    "firstname": "Test",
-                                    "lastname": "User",
-                                    "username": "test",
-                                    # Random characters are appended for unique
-                                    # username
-                                    "password": "password",
-                                    },
-                          "service_offering": {
-                                    "name": "Tiny Instance",
-                                    "displaytext": "Tiny Instance",
-                                    "cpunumber": 1,
-                                    "cpuspeed": 100,
-                                    "memory": 128,
-                                    },
-                         "vpc_offering": {
-                                    "name": 'VPC off',
-                                    "displaytext": 'VPC off',
-                                    "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL',
-                                },
-                         "vpc": {
-                                 "name": "TestVPC",
-                                 "displaytext": "TestVPC",
-                                 "cidr": '10.0.0.1/24'
-                                 },
-                         "virtual_machine": {
-                                    "displayname": "Test VM",
-                                    "username": "root",
-                                    "password": "password",
-                                    "ssh_port": 22,
-                                    "hypervisor": 'XenServer',
-                                    # Hypervisor type should be same as
-                                    # hypervisor type of cluster
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "protocol": 'TCP',
-                                },
-                         "ostype": 'CentOS 5.3 (64-bit)',
-                         # Cent OS 5.3 (64 bit)
-                         "sleep": 60,
-                         "timeout": 10
-                    }
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "vpc_offering": {
+                "name": 'VPC off',
+                "displaytext": 'VPC off',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,\
+UserData,StaticNat,NetworkACL',
+            },
+            "vpc": {
+                "name": "TestVPC",
+                "displaytext": "TestVPC",
+                "cidr": '10.0.0.1/24'
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "hypervisor": 'XenServer',
+                # Hypervisor type should be same as
+                # hypervisor type of cluster
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "ostype": 'CentOS 5.3 (64-bit)',
+            # Cent OS 5.3 (64 bit)
+            "sleep": 60,
+            "timeout": 10
+        }
+
 
 class TestVPCHostMaintenance(cloudstackTestCase):
 
@@ -93,24 +95,20 @@ class TestVPCHostMaintenance(cloudstackTestCase):
         cls.services["virtual_machine"]["template"] = cls.template.id
         cls.services["mode"] = cls.zone.networktype
 
-        cls.service_offering = ServiceOffering.create(
-            cls.api_client,
-            cls.services["service_offering"]
-        )
         cls.vpc_off = VpcOffering.create(
             cls.api_client,
             cls.services["vpc_offering"]
         )
         cls.vpc_off.update(cls.api_client, state='Enabled')
-        hosts = Host.list(
+        cls.hosts = Host.list(
             cls.api_client,
             zoneid=cls.zone.id,
             listall=True,
             type='Routing'
         )
 
-        if isinstance(hosts, list):
-            for host in hosts:
+        if isinstance(cls.hosts, list):
+            for host in cls.hosts:
                 Host.enableMaintenance(
                     cls.api_client,
                     id=host.id
@@ -124,7 +122,8 @@ class TestVPCHostMaintenance(cloudstackTestCase):
                         id=host.id,
                         listall=True
                     )
-                    if hosts_states[0].resourcestate == 'PrepareForMaintenance':
+                    if hosts_states[
+                            0].resourcestate == 'PrepareForMaintenance':
                         # Wait for sometimetill host goes in maintenance state
                         time.sleep(cls.services["sleep"])
                     elif hosts_states[0].resourcestate == 'Maintenance':
@@ -132,11 +131,11 @@ class TestVPCHostMaintenance(cloudstackTestCase):
                         break
                     elif timeout == 0:
                         raise unittest.SkipTest(
-                            "Failed to enable maintenance mode on %s" % host.name)
+                            "Failed to enable maintenance mode on %s" %
+                            host.name)
                     timeout = timeout - 1
 
         cls._cleanup = [
-            cls.service_offering,
             cls.vpc_off
         ]
         return
@@ -144,20 +143,13 @@ class TestVPCHostMaintenance(cloudstackTestCase):
     @classmethod
     def tearDownClass(cls):
         try:
-            #Cleanup resources used
+            # Cleanup resources used
             cleanup_resources(cls.api_client, cls._cleanup)
-            hosts = Host.list(
-                cls.api_client,
-                zoneid=cls.zone.id,
-                listall=True,
-                type='Routing'
-            )
-            if isinstance(hosts, list):
-                for host in hosts:
-                    Host.cancelMaintenance(
-                        cls.api_client,
-                        id=host.id
-                    )
+            for host in cls.hosts:
+                Host.cancelMaintenance(
+                    cls.api_client,
+                    id=host.id
+                )
                 hosts_states = Host.list(
                     cls.api_client,
                     id=host.id,
@@ -165,7 +157,8 @@ class TestVPCHostMaintenance(cloudstackTestCase):
                 )
                 if hosts_states[0].resourcestate != 'Enabled':
                     raise Exception(
-                        "Failed to cancel maintenance mode on %s" % (host.name))
+                        "Failed to cancel maintenance mode on %s" %
+                        (host.name))
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
         return
@@ -184,18 +177,8 @@ class TestVPCHostMaintenance(cloudstackTestCase):
 
     def tearDown(self):
         try:
-            #Clean up, terminate the created network offerings
+            # Clean up, terminate the created network offerings
             cleanup_resources(self.apiclient, self.cleanup)
-            interval = list_configurations(
-                self.apiclient,
-                name='network.gc.interval'
-            )
-            wait = list_configurations(
-                self.apiclient,
-                name='network.gc.wait'
-            )
-            # Sleep to ensure that all resources are deleted
-            time.sleep(int(interval[0].value) + int(wait[0].value))
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
         return
@@ -243,7 +226,7 @@ class TestVPCHostMaintenance(cloudstackTestCase):
         )
         if state:
             self.assertEqual(
-                vpc_networks[0].state,
+                vpc_networks[0].state.lower(),
                 state,
                 "VPC state should be '%s'" % state
             )
@@ -269,9 +252,10 @@ class TestVPCHostMaintenance(cloudstackTestCase):
             vpcofferingid=self.vpc_off.id,
             zoneid=self.zone.id,
             account=self.account.name,
-            domainid=self.account.domainid
+            domainid=self.account.domainid,
+            start=False
         )
-        self.validate_vpc_network(vpc, state='Disabled')
+        self.validate_vpc_network(vpc, state='inactive')
         return
 
     @attr(tags=["advanced", "intervlan"])
@@ -295,9 +279,10 @@ class TestVPCHostMaintenance(cloudstackTestCase):
             vpcofferingid=self.vpc_off.id,
             zoneid=self.zone.id,
             account=self.account.name,
-            domainid=self.account.domainid
+            domainid=self.account.domainid,
+            start=False
         )
-        self.validate_vpc_network(vpc, state='Disabled')
+        self.validate_vpc_network(vpc, state='inactive')
         interval = list_configurations(
             self.apiclient,
             name='network.gc.interval'


[36/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8117: Increase the allowed margin (+/-) for memory of VM on hyperv used to equate with the memory specified in service offering

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/67eff27f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/67eff27f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/67eff27f

Branch: refs/heads/reporter
Commit: 67eff27f548ec8a708247938c93200f27e22073b
Parents: 0ed4014
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Tue Dec 23 19:02:56 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Wed Dec 24 11:10:28 2014 +0530

----------------------------------------------------------------------
 .../integration/smoke/test_service_offerings.py | 157 ++++++++++++-------
 1 file changed, 100 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/67eff27f/test/integration/smoke/test_service_offerings.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py
index e390f75..1b1b113 100644
--- a/test/integration/smoke/test_service_offerings.py
+++ b/test/integration/smoke/test_service_offerings.py
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -16,21 +16,22 @@
 # under the License.
 """ BVT tests for Service offerings"""
 
-#Import Local Modules
+# Import Local Modules
 from marvin.codes import FAILED
 from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.cloudstackAPI import changeServiceForVirtualMachine,updateServiceOffering
+from marvin.cloudstackAPI import (changeServiceForVirtualMachine,
+                                  updateServiceOffering)
 from marvin.lib.utils import (isAlmostEqual,
-                                          cleanup_resources,
-                                          random_gen)
+                              cleanup_resources,
+                              random_gen)
 from marvin.lib.base import (ServiceOffering,
-                                         Account,
-                                         VirtualMachine)
+                             Account,
+                             VirtualMachine)
 from marvin.lib.common import (list_service_offering,
-                                           list_virtual_machines,
-                                           get_domain,
-                                           get_zone,
-                                           get_template)
+                               list_virtual_machines,
+                               get_domain,
+                               get_zone,
+                               get_template)
 from nose.plugins.attrib import attr
 
 
@@ -38,6 +39,7 @@ _multiprocess_shared_ = True
 
 
 class TestCreateServiceOffering(cloudstackTestCase):
+
     def setUp(self):
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
@@ -46,7 +48,7 @@ class TestCreateServiceOffering(cloudstackTestCase):
 
     def tearDown(self):
         try:
-            #Clean up, terminate the created templates
+            # Clean up, terminate the created templates
             cleanup_resources(self.apiclient, self.cleanup)
 
         except Exception as e:
@@ -54,12 +56,21 @@ class TestCreateServiceOffering(cloudstackTestCase):
 
         return
 
-    @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "eip",
+            "sg"],
+        required_hardware="false")
     def test_01_create_service_offering(self):
         """Test to create service offering"""
 
         # Validate the following:
-        # 1. createServiceOfferings should return a valid information for newly created offering
+        # 1. createServiceOfferings should return a valid information
+        #    for newly created offering
         # 2. The Cloud Database contains the valid information
 
         service_offering = ServiceOffering.create(
@@ -68,7 +79,9 @@ class TestCreateServiceOffering(cloudstackTestCase):
         )
         self.cleanup.append(service_offering)
 
-        self.debug("Created service offering with ID: %s" % service_offering.id)
+        self.debug(
+            "Created service offering with ID: %s" %
+            service_offering.id)
 
         list_service_response = list_service_offering(
             self.apiclient,
@@ -115,6 +128,7 @@ class TestCreateServiceOffering(cloudstackTestCase):
 
 
 class TestServiceOfferings(cloudstackTestCase):
+
     def setUp(self):
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
@@ -122,7 +136,7 @@ class TestServiceOfferings(cloudstackTestCase):
 
     def tearDown(self):
         try:
-            #Clean up, terminate the created templates
+            # Clean up, terminate the created templates
             cleanup_resources(self.apiclient, self.cleanup)
 
         except Exception as e:
@@ -135,6 +149,7 @@ class TestServiceOfferings(cloudstackTestCase):
         testClient = super(TestServiceOfferings, cls).getClsTestClient()
         cls.apiclient = testClient.getApiClient()
         cls.services = testClient.getParsedTestDataConfig()
+        cls.hypervisor = testClient.getHypervisorInfo()
 
         domain = get_domain(cls.apiclient)
         cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
@@ -149,12 +164,13 @@ class TestServiceOfferings(cloudstackTestCase):
             cls.services["service_offerings"]
         )
         template = get_template(
-                            cls.apiclient,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.apiclient,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         if template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
+            assert False, "get_template() failed to return\
+                    template with description %s" % cls.services["ostype"]
 
         # Set Zones and disk offerings
         cls.services["small"]["zoneid"] = cls.zone.id
@@ -165,47 +181,57 @@ class TestServiceOfferings(cloudstackTestCase):
 
         # Create VMs, NAT Rules etc
         cls.account = Account.create(
-                            cls.apiclient,
-                            cls.services["account"],
-                            domainid=domain.id
-                            )
+            cls.apiclient,
+            cls.services["account"],
+            domainid=domain.id
+        )
 
         cls.small_offering = ServiceOffering.create(
-                                    cls.apiclient,
-                                    cls.services["service_offerings"]["small"]
-                                    )
+            cls.apiclient,
+            cls.services["service_offerings"]["small"]
+        )
 
         cls.medium_offering = ServiceOffering.create(
-                                    cls.apiclient,
-                                    cls.services["service_offerings"]["medium"]
-                                    )
+            cls.apiclient,
+            cls.services["service_offerings"]["medium"]
+        )
         cls.medium_virtual_machine = VirtualMachine.create(
-                                       cls.apiclient,
-                                       cls.services["medium"],
-                                       accountid=cls.account.name,
-                                       domainid=cls.account.domainid,
-                                       serviceofferingid=cls.medium_offering.id,
-                                       mode=cls.services["mode"]
-                                    )
+            cls.apiclient,
+            cls.services["medium"],
+            accountid=cls.account.name,
+            domainid=cls.account.domainid,
+            serviceofferingid=cls.medium_offering.id,
+            mode=cls.services["mode"]
+        )
         cls._cleanup = [
-                        cls.small_offering,
-                        cls.medium_offering,
-                        cls.account
-                        ]
+            cls.small_offering,
+            cls.medium_offering,
+            cls.account
+        ]
         return
 
     @classmethod
     def tearDownClass(cls):
         try:
-            cls.apiclient = super(TestServiceOfferings, cls).getClsTestClient().getApiClient()
-            #Clean up, terminate the created templates
+            cls.apiclient = super(
+                TestServiceOfferings,
+                cls).getClsTestClient().getApiClient()
+            # Clean up, terminate the created templates
             cleanup_resources(cls.apiclient, cls._cleanup)
 
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
         return
 
-    @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "eip",
+            "sg"],
+        required_hardware="false")
     def test_02_edit_service_offering(self):
         """Test to update existing service offering"""
 
@@ -213,7 +239,7 @@ class TestServiceOfferings(cloudstackTestCase):
         # 1. updateServiceOffering should return
         #    a valid information for newly created offering
 
-        #Generate new name & displaytext from random data
+        # Generate new name & displaytext from random data
         random_displaytext = random_gen()
         random_name = random_gen()
 
@@ -221,7 +247,7 @@ class TestServiceOfferings(cloudstackTestCase):
                    self.service_offering_1.id)
 
         cmd = updateServiceOffering.updateServiceOfferingCmd()
-        #Add parameters for API call
+        # Add parameters for API call
         cmd.id = self.service_offering_1.id
         cmd.displaytext = random_displaytext
         cmd.name = random_name
@@ -256,7 +282,15 @@ class TestServiceOfferings(cloudstackTestCase):
 
         return
 
-    @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
+    @attr(
+        tags=[
+            "advanced",
+            "advancedns",
+            "smoke",
+            "basic",
+            "eip",
+            "sg"],
+        required_hardware="false")
     def test_03_delete_service_offering(self):
         """Test to delete service offering"""
 
@@ -316,22 +350,23 @@ class TestServiceOfferings(cloudstackTestCase):
                 self.debug("VM state: %s" % vm.state)
             else:
                 raise Exception(
-                    "Failed to start VM (ID: %s) after changing service offering" % vm.id)
+                    "Failed to start VM (ID: %s) after changing\
+                            service offering" % vm.id)
 
         try:
             ssh = self.medium_virtual_machine.get_ssh_client()
         except Exception as e:
             self.fail(
-                "SSH Access failed for %s: %s" %\
+                "SSH Access failed for %s: %s" %
                 (self.medium_virtual_machine.ipaddress, e)
             )
 
         cpuinfo = ssh.execute("cat /proc/cpuinfo")
         cpu_cnt = len([i for i in cpuinfo if "processor" in i])
-        #'cpu MHz\t\t: 2660.499'
+        # 'cpu MHz\t\t: 2660.499'
         cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3]
         meminfo = ssh.execute("cat /proc/meminfo")
-        #MemTotal:        1017464 kB
+        # MemTotal:        1017464 kB
         total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
 
         self.debug(
@@ -339,7 +374,7 @@ class TestServiceOfferings(cloudstackTestCase):
                 cpu_cnt,
                 cpu_speed,
                 total_mem
-                ))
+            ))
         self.assertAlmostEqual(
             int(cpu_cnt),
             self.small_offering.cpunumber,
@@ -350,11 +385,19 @@ class TestServiceOfferings(cloudstackTestCase):
             self.small_offering.cpuspeed,
             "Check CPU Speed for small offering"
         )
+
+        range = 20
+        if self.hypervisor.lower() == "hyperv":
+            range = 200
+        # TODO: Find the memory allocated to VM on hyperv hypervisor using
+        # powershell commands and use that value to equate instead of
+        # manipulating range, currently we get the memory count much less
+        # because of the UI component
         self.assertTrue(
             isAlmostEqual(int(int(total_mem) / 1024),
-                int(self.small_offering.memory),
-                range=20
-            ),
+                          int(self.small_offering.memory),
+                          range=range
+                          ),
             "Check Memory(kb) for small offering"
         )
         return


[37/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8119. Propagate error message to UI for attach/detach volume failure operations.
For AttachVolume/DetachVolume API command, improve user error message in case of RuntimeException by throwing the exception instead of 'Unexpected Exception'.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/4d7ede53
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/4d7ede53
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/4d7ede53

Branch: refs/heads/reporter
Commit: 4d7ede535df568c6aab4a228ac794ec11d433e1e
Parents: 67eff27
Author: Likitha Shetty <li...@citrix.com>
Authored: Tue Dec 2 16:50:20 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 11:29:03 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/storage/resource/VmwareStorageProcessor.java   | 6 +++++-
 server/src/com/cloud/storage/VolumeApiServiceImpl.java       | 4 ++++
 .../src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java | 8 ++++++++
 3 files changed, 17 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4d7ede53/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
index cb7b23a..ba2255b 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
@@ -1367,7 +1367,11 @@ public class VmwareStorageProcessor implements StorageProcessor {
                 hostService.invalidateServiceContext(null);
             }
 
-            String msg = "AttachVolumeCommand failed due to " + VmwareHelper.getExceptionMessage(e);
+            String msg = "";
+            if (isAttach)
+                msg += "Failed to attach volume: " + e.getMessage();
+            else
+                msg += "Failed to detach volume: " + e.getMessage();
             s_logger.error(msg, e);
             return new AttachAnswer(msg);
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4d7ede53/server/src/com/cloud/storage/VolumeApiServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
index 5af0d2d..c1652ed 100644
--- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
@@ -1378,6 +1378,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
                     throw (ConcurrentOperationException)jobResult;
                 else if (jobResult instanceof InvalidParameterValueException)
                     throw (InvalidParameterValueException)jobResult;
+                else if (jobResult instanceof RuntimeException)
+                    throw (RuntimeException)jobResult;
                 else if (jobResult instanceof Throwable)
                     throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
                 else if (jobResult instanceof Long) {
@@ -1580,6 +1582,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
             if (jobResult != null) {
                 if (jobResult instanceof ConcurrentOperationException)
                     throw (ConcurrentOperationException)jobResult;
+                else if (jobResult instanceof RuntimeException)
+                    throw (RuntimeException)jobResult;
                 else if (jobResult instanceof Throwable)
                     throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
                 else if (jobResult instanceof Long) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4d7ede53/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index 8f05021..cd96105 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -1126,6 +1126,14 @@ public class VirtualMachineMO extends BaseMO {
             throw new Exception("No such disk device: " + vmdkDatastorePath);
         }
 
+        // IDE virtual disk cannot be detached if VM is running
+        if (deviceInfo.second() != null && deviceInfo.second().contains("ide")) {
+            if (getPowerState() == VirtualMachinePowerState.POWERED_ON) {
+                throw new Exception("Removing a virtual disk over IDE controller is not supported while VM is running in VMware hypervisor. " +
+                        "Please re-try when VM is not running.");
+            }
+        }
+
         List<Pair<String, ManagedObjectReference>> chain = getDiskDatastorePathChain(deviceInfo.first(), true);
 
         VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec();


[07/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8097: Failed to create volume snapshot after vm live migration across clusters.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0c4128e0
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0c4128e0
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0c4128e0

Branch: refs/heads/reporter
Commit: 0c4128e024b519f24625ad8cd6b53ddf0137728e
Parents: 3090e4a
Author: Sanjay Tripathi <sa...@citrix.com>
Authored: Fri Dec 19 14:26:48 2014 +0530
Committer: Sanjay Tripathi <sa...@citrix.com>
Committed: Fri Dec 19 14:26:48 2014 +0530

----------------------------------------------------------------------
 .../orchestration/VolumeOrchestrator.java       | 16 +++++-
 .../src/com/cloud/storage/dao/SnapshotDao.java  |  2 +
 .../com/cloud/storage/dao/SnapshotDaoImpl.java  | 10 ++++
 .../datastore/db/SnapshotDataStoreDao.java      |  4 ++
 .../snapshot/XenserverSnapshotStrategy.java     | 57 +++++++++++++-------
 .../image/db/SnapshotDataStoreDaoImpl.java      | 47 +++++++++++++++-
 6 files changed, 113 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index 515d5ec..e04bd6d 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -58,6 +58,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
 import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
@@ -96,6 +97,7 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc;
 import com.cloud.storage.Volume;
 import com.cloud.storage.Volume.Type;
 import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.SnapshotDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.dao.VolumeDetailsDao;
 import com.cloud.template.TemplateManager;
@@ -142,6 +144,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     @Inject
     protected VolumeDao _volumeDao;
     @Inject
+    protected SnapshotDao _snapshotDao;
+    @Inject
+    protected SnapshotDataStoreDao _snapshotDataStoreDao;
+    @Inject
     protected ResourceLimitService _resourceLimitMgr;
     @Inject
     VolumeDetailsDao _volDetailDao;
@@ -919,8 +925,14 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                s_logger.error("migrate volume failed:" + result.getResult());
-                throw new StorageUnavailableException("migrate volume failed: " + result.getResult(), destPool.getId());
+                s_logger.error("Migrate volume failed:" + result.getResult());
+                throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
+            } else {
+                // update the volumeId for snapshots on secondary
+                if (!_snapshotDao.listByVolumeId(vol.getId()).isEmpty()) {
+                    _snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
+                    _snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
+                }
             }
             return result.getVolume();
         } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/schema/src/com/cloud/storage/dao/SnapshotDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/dao/SnapshotDao.java b/engine/schema/src/com/cloud/storage/dao/SnapshotDao.java
index f55352b2..ff2e445 100644
--- a/engine/schema/src/com/cloud/storage/dao/SnapshotDao.java
+++ b/engine/schema/src/com/cloud/storage/dao/SnapshotDao.java
@@ -57,4 +57,6 @@ public interface SnapshotDao extends GenericDao<SnapshotVO, Long>, StateDao<Snap
 
     List<SnapshotVO> listAllByStatus(Snapshot.State... status);
 
+    void updateVolumeIds(long oldVolId, long newVolId);
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java
index 204447c..84a92d7 100644
--- a/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java
+++ b/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java
@@ -41,6 +41,7 @@ import com.cloud.utils.db.DB;
 import com.cloud.utils.db.Filter;
 import com.cloud.utils.db.GenericDaoBase;
 import com.cloud.utils.db.GenericSearchBuilder;
+import com.cloud.utils.db.UpdateBuilder;
 import com.cloud.utils.db.JoinBuilder.JoinType;
 import com.cloud.utils.db.SearchBuilder;
 import com.cloud.utils.db.SearchCriteria;
@@ -326,4 +327,13 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
         return true;
     }
 
+    @Override
+    public void updateVolumeIds(long oldVolId, long newVolId) {
+        SearchCriteria<SnapshotVO> sc = VolumeIdSearch.create();
+        sc.setParameters("volumeId", oldVolId);
+        SnapshotVO snapshot = createForUpdate();
+        snapshot.setVolumeId(newVolId);
+        UpdateBuilder ub = getUpdateBuilder(snapshot);
+        update(ub, sc, null);
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java
index e24c035..231b241 100644
--- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java
+++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java
@@ -56,4 +56,8 @@ public interface SnapshotDataStoreDao extends GenericDao<SnapshotDataStoreVO, Lo
     void updateStoreRoleToCache(long storeId);
 
     SnapshotDataStoreVO findLatestSnapshotForVolume(Long volumeId, DataStoreRole role);
+
+    SnapshotDataStoreVO findOldestSnapshotForVolume(Long volumeId, DataStoreRole role);
+
+    void updateVolumeIds(long oldVolId, long newVolId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java
index 90efcde..51f75bd 100644
--- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java
@@ -46,7 +46,9 @@ import com.cloud.storage.DataStoreRole;
 import com.cloud.storage.Snapshot;
 import com.cloud.storage.SnapshotVO;
 import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.snapshot.SnapshotManager;
 import com.cloud.utils.NumbersUtil;
 import com.cloud.utils.db.DB;
@@ -68,6 +70,8 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
     @Inject
     SnapshotDao snapshotDao;
     @Inject
+    VolumeDao volumeDao;
+    @Inject
     SnapshotDataFactory snapshotDataFactory;
 
     @Override
@@ -105,31 +109,44 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
 
         // determine full snapshot backup or not
 
-
         boolean fullBackup = true;
         SnapshotDataStoreVO parentSnapshotOnBackupStore = snapshotStoreDao.findLatestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Image);
         HypervisorType hypervisorType = snapshot.getBaseVolume().getHypervisorType();
         if (parentSnapshotOnBackupStore != null && hypervisorType == Hypervisor.HypervisorType.XenServer) { // CS does incremental backup only for XenServer
-            int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"),
-                    SnapshotManager.DELTAMAX);
-            int deltaSnap = _deltaSnapshotMax;
-            int i;
-
-            for (i = 1; i < deltaSnap; i++) {
-                Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
-                if (prevBackupId == 0) {
-                    break;
-                }
-                parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image);
-                if (parentSnapshotOnBackupStore == null) {
-                    break;
-                }
-            }
 
-            if (i >= deltaSnap) {
-                fullBackup = true;
-            } else {
-                fullBackup = false;
+            // In case of volume migration from one pool to other pool, CS should take full snapshot to avoid any issues with delta chain,
+            // to check if this is a migrated volume, compare the current pool id of volume and store_id of oldest snapshot on primary for this volume.
+            // Why oldest? Because at this point CS has two snapshot on primary entries for same volume, one with old pool_id and other one with
+            // current pool id. So, verify and if volume found to be migrated, delete snapshot entry with previous pool store_id.
+            SnapshotDataStoreVO oldestSnapshotOnPrimary = snapshotStoreDao.findOldestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
+            VolumeVO volume = volumeDao.findById(snapshot.getVolumeId());
+            if (oldestSnapshotOnPrimary != null) {
+                if (oldestSnapshotOnPrimary.getDataStoreId() == volume.getPoolId()) {
+                    int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"),
+                            SnapshotManager.DELTAMAX);
+                    int deltaSnap = _deltaSnapshotMax;
+                    int i;
+
+                    for (i = 1; i < deltaSnap; i++) {
+                        Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
+                        if (prevBackupId == 0) {
+                            break;
+                        }
+                        parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image);
+                        if (parentSnapshotOnBackupStore == null) {
+                            break;
+                        }
+                    }
+
+                    if (i >= deltaSnap) {
+                        fullBackup = true;
+                    } else {
+                        fullBackup = false;
+                    }
+                } else {
+                    // if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it becasue CS created one more snapshot entry for current pool
+                    snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId());
+                }
             }
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0c4128e0/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java
index 28d6598..ea73ecd 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java
@@ -54,6 +54,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
     private SearchBuilder<SnapshotDataStoreVO> snapshotSearch;
     private SearchBuilder<SnapshotDataStoreVO> storeSnapshotSearch;
     private SearchBuilder<SnapshotDataStoreVO> snapshotIdSearch;
+    private SearchBuilder<SnapshotDataStoreVO> volumeIdSearch;
 
     private final String parentSearch = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where store_id = ? "
         + " and store_role = ? and volume_id = ? and state = 'Ready'" + " order by created DESC " + " limit 1";
@@ -61,6 +62,10 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
             " store_role = ? and volume_id = ? and state = 'Ready'" +
             " order by created DESC " +
             " limit 1";
+    private final String findOldestSnapshot = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where " +
+            " store_role = ? and volume_id = ? and state = 'Ready'" +
+            " order by created ASC " +
+            " limit 1";
 
     @Override
     public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
@@ -110,6 +115,10 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
         snapshotIdSearch.and("snapshot_id", snapshotIdSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
         snapshotIdSearch.done();
 
+        volumeIdSearch = createSearchBuilder();
+        volumeIdSearch.and("volume_id", volumeIdSearch.entity().getVolumeId(), SearchCriteria.Op.EQ);
+        volumeIdSearch.done();
+
         return true;
     }
 
@@ -225,7 +234,34 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
                 return findByStoreSnapshot(role, sid, snid);
             }
         } catch (SQLException e) {
-            s_logger.debug("Failed to find parent snapshot: " + e.toString());
+            s_logger.debug("Failed to find latest snapshot for volume: " + volumeId + " due to: "  + e.toString());
+        } finally {
+            try {
+                if (pstmt != null)
+                    pstmt.close();
+            } catch (SQLException e) {
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public SnapshotDataStoreVO findOldestSnapshotForVolume(Long volumeId, DataStoreRole role) {
+        TransactionLegacy txn = TransactionLegacy.currentTxn();
+        PreparedStatement pstmt = null;
+        ResultSet rs = null;
+        try {
+            pstmt = txn.prepareStatement(findOldestSnapshot);
+            pstmt.setString(1, role.toString());
+            pstmt.setLong(2, volumeId);
+            rs = pstmt.executeQuery();
+            while (rs.next()) {
+                long sid = rs.getLong(1);
+                long snid = rs.getLong(3);
+                return findByStoreSnapshot(role, sid, snid);
+            }
+        } catch (SQLException e) {
+            s_logger.debug("Failed to find oldest snapshot for volume: " + volumeId + " due to: "  + e.toString());
         } finally {
             try {
                 if (pstmt != null)
@@ -367,4 +403,13 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
         }
     }
 
+    @Override
+    public void updateVolumeIds(long oldVolId, long newVolId) {
+        SearchCriteria<SnapshotDataStoreVO> sc = volumeIdSearch.create();
+        sc.setParameters("volume_id", oldVolId);
+        SnapshotDataStoreVO snapshot = createForUpdate();
+        snapshot.setVolumeId(newVolId);
+        UpdateBuilder ub = getUpdateBuilder(snapshot);
+        update(ub, sc, null);
+    }
 }


[34/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8119. [VMware] Cannot attach more than 8 volumes to a VM.
While attaching a new disk to an instance, the unit number on the controller key should be the lowest unit number
that is not in use. And in case the controller type is SCSI it shouln't be the reserved SCSI unit number.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f420dd55
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f420dd55
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f420dd55

Branch: refs/heads/reporter
Commit: f420dd55fb5c4ec40aec0232618b2162c6d2f88c
Parents: ddcae8a
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Dec 1 19:49:06 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 10:48:03 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java    | 9 ++++-----
 .../src/com/cloud/hypervisor/vmware/util/VmwareHelper.java  | 8 --------
 2 files changed, 4 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f420dd55/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index be39bfb..8f05021 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -1018,8 +1018,6 @@ public class VirtualMachineMO extends BaseMO {
         }
 
         int deviceNumber = getNextDeviceNumber(controllerKey);
-        if (controllerKey != ideControllerKey && VmwareHelper.isReservedScsiDeviceNumber(deviceNumber))
-            deviceNumber++;
 
         newDisk.setControllerKey(controllerKey);
         newDisk.setKey(-deviceNumber);
@@ -1858,8 +1856,6 @@ public class VirtualMachineMO extends BaseMO {
     public int getNextScsiDiskDeviceNumber() throws Exception {
         int scsiControllerKey = getScsiDeviceControllerKey();
         int deviceNumber = getNextDeviceNumber(scsiControllerKey);
-        if (VmwareHelper.isReservedScsiDeviceNumber(deviceNumber))
-            deviceNumber++;
 
         return deviceNumber;
     }
@@ -2319,6 +2315,7 @@ public class VirtualMachineMO extends BaseMO {
 
         List<Integer> existingUnitNumbers = new ArrayList<Integer>();
         int deviceNumber = 0;
+        int ideControllerKey = getIDEDeviceControllerKey();
         if (devices != null && devices.size() > 0) {
             for (VirtualDevice device : devices) {
                 if (device.getControllerKey() != null && device.getControllerKey().intValue() == controllerKey) {
@@ -2327,8 +2324,10 @@ public class VirtualMachineMO extends BaseMO {
             }
         }
         while (true) {
+            // Next device number should be the lowest device number on the key that is not in use and is not reserved.
             if (!existingUnitNumbers.contains(Integer.valueOf(deviceNumber))) {
-                break;
+                if (controllerKey != ideControllerKey && !VmwareHelper.isReservedScsiDeviceNumber(deviceNumber))
+                    break;
             }
             ++deviceNumber;
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f420dd55/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
index f31ffc0..e38e7ee 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
@@ -188,8 +188,6 @@ public class VmwareHelper {
             controllerKey = ideControllerKey;
         if (deviceNumber < 0) {
             deviceNumber = vmMo.getNextDeviceNumber(controllerKey);
-            if (controllerKey != ideControllerKey && isReservedScsiDeviceNumber(deviceNumber))
-                deviceNumber++;
         }
         disk.setControllerKey(controllerKey);
 
@@ -261,8 +259,6 @@ public class VmwareHelper {
         disk.setControllerKey(controllerKey);
         if (deviceNumber < 0) {
             deviceNumber = vmMo.getNextDeviceNumber(controllerKey);
-            if (controllerKey != ideControllerKey && isReservedScsiDeviceNumber(deviceNumber))
-                deviceNumber++;
         }
 
         disk.setKey(-contextNumber);
@@ -300,8 +296,6 @@ public class VmwareHelper {
                 controllerKey = ideControllerKey;
             if (deviceNumber < 0) {
                 deviceNumber = vmMo.getNextDeviceNumber(controllerKey);
-                if (controllerKey != ideControllerKey && isReservedScsiDeviceNumber(deviceNumber))
-                    deviceNumber++;
             }
 
             disk.setControllerKey(controllerKey);
@@ -354,8 +348,6 @@ public class VmwareHelper {
             controllerKey = ideControllerKey;
         if (deviceNumber < 0) {
             deviceNumber = vmMo.getNextDeviceNumber(controllerKey);
-            if (controllerKey != ideControllerKey && isReservedScsiDeviceNumber(deviceNumber))
-                deviceNumber++;
         }
 
         disk.setControllerKey(controllerKey);


[31/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8115: Update default ordering of HA investigators
Moved HV specific investigators before PingInvestigator in default ordering


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ceae9786
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ceae9786
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ceae9786

Branch: refs/heads/reporter
Commit: ceae97868cb98b503324afb7743a25ce1f5c1516
Parents: bce67bc
Author: Koushik Das <ko...@apache.org>
Authored: Tue Dec 23 16:12:39 2014 +0530
Committer: Koushik Das <ko...@apache.org>
Committed: Tue Dec 23 16:12:39 2014 +0530

----------------------------------------------------------------------
 .../META-INF/cloudstack/core/spring-core-registry-core-context.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ceae9786/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml
----------------------------------------------------------------------
diff --git a/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml b/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml
index 3263e92..939cffe 100644
--- a/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml
+++ b/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml
@@ -68,7 +68,7 @@
         class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry">
         <property name="orderConfigKey" value="ha.investigators.order" />
         <property name="orderConfigDefault"
-            value="SimpleInvestigator,XenServerInvestigator,PingInvestigator,ManagementIPSysVMInvestigator,KVMInvestigator" />
+            value="SimpleInvestigator,XenServerInvestigator,KVMInvestigator,HypervInvestigator,VMwareInvestigator,PingInvestigator,ManagementIPSysVMInvestigator" />
         <property name="excludeKey" value="ha.investigators.exclude" />
     </bean>
 


[44/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8127. VM name not displayed after attaching data disk to a VM.
If user hasn't supplied a display name for a VM, default it to VM name in listVolume response.
This behaviour is identical to listVirtualMachine response.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ff7997a2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ff7997a2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ff7997a2

Branch: refs/heads/reporter
Commit: ff7997a2b139ed25f301ed1b8ad388864fcb9980
Parents: 974b018
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Dec 15 15:58:23 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 15:21:34 2014 +0530

----------------------------------------------------------------------
 server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ff7997a2/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
index 97a1ff2..68a578f 100644
--- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
@@ -91,7 +91,11 @@ public class VolumeJoinDaoImpl extends GenericDaoBase<VolumeJoinVO, Long> implem
             volResponse.setVirtualMachineId(volume.getVmUuid());
             volResponse.setVirtualMachineName(volume.getVmName());
             volResponse.setVirtualMachineState(volume.getVmState().toString());
-            volResponse.setVirtualMachineDisplayName(volume.getVmDisplayName());
+            if (volume.getVmDisplayName() != null) {
+                volResponse.setVirtualMachineDisplayName(volume.getVmDisplayName());
+            } else {
+                volResponse.setVirtualMachineDisplayName(volume.getVmName());
+            }
         }
 
         volResponse.setProvisioningType(volume.getProvisioningType().toString());


[38/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8119. [VMware] Cannot attach more than 8 volumes to a VM.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b1bca2a2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b1bca2a2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b1bca2a2

Branch: refs/heads/reporter
Commit: b1bca2a2c02d0f0f5e3435fe2587334ae107495d
Parents: 4d7ede5
Author: Likitha Shetty <li...@citrix.com>
Authored: Wed Dec 3 19:28:41 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 11:36:27 2014 +0530

----------------------------------------------------------------------
 .../src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b1bca2a2/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index cd96105..727903e 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -2323,7 +2323,7 @@ public class VirtualMachineMO extends BaseMO {
 
         List<Integer> existingUnitNumbers = new ArrayList<Integer>();
         int deviceNumber = 0;
-        int ideControllerKey = getIDEDeviceControllerKey();
+        int scsiControllerKey = getScsiDeviceControllerKeyNoException();
         if (devices != null && devices.size() > 0) {
             for (VirtualDevice device : devices) {
                 if (device.getControllerKey() != null && device.getControllerKey().intValue() == controllerKey) {
@@ -2334,7 +2334,7 @@ public class VirtualMachineMO extends BaseMO {
         while (true) {
             // Next device number should be the lowest device number on the key that is not in use and is not reserved.
             if (!existingUnitNumbers.contains(Integer.valueOf(deviceNumber))) {
-                if (controllerKey != ideControllerKey && !VmwareHelper.isReservedScsiDeviceNumber(deviceNumber))
+                if (controllerKey != scsiControllerKey || !VmwareHelper.isReservedScsiDeviceNumber(deviceNumber))
                     break;
             }
             ++deviceNumber;


[10/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-6666: UI > network > VPC > Router > Public IP Addresses > IP Address detailView > Configuration tab > Load Balancing > Select VM screen > implement keyword search.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/5fea96fd
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/5fea96fd
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/5fea96fd

Branch: refs/heads/reporter
Commit: 5fea96fdc29166eba6937f40d7956283653265b7
Parents: 8bcde02
Author: Jessica Wang <je...@apache.org>
Authored: Fri Dec 19 14:25:32 2014 -0800
Committer: Jessica Wang <je...@apache.org>
Committed: Fri Dec 19 14:28:45 2014 -0800

----------------------------------------------------------------------
 ui/scripts/network.js | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5fea96fd/ui/scripts/network.js
----------------------------------------------------------------------
diff --git a/ui/scripts/network.js b/ui/scripts/network.js
index 44e66b6..1d9e490 100755
--- a/ui/scripts/network.js
+++ b/ui/scripts/network.js
@@ -3357,18 +3357,18 @@
                                                 dataProvider: function(args) {
                                                 	var itemData = $.isArray(args.context.multiRule) && args.context.subItemData ? args.context.subItemData : [];
 
+                                                	var data = {};
+                                                    listViewDataProvider(args, data);
+                                                	
                                                     var networkid;
-                                                    if ('vpc' in args.context)
+                                                    if ('vpc' in args.context) {
                                                         networkid = args.context.multiData.tier;
-                                                    else
+                                                    } else {
                                                         networkid = args.context.ipAddresses[0].associatednetworkid;
-
-                                                    var data = {
-                                                        page: args.page,
-                                                        pageSize: pageSize,
-                                                        networkid: networkid,
-                                                        listAll: true
-                                                    };
+                                                    }
+                                                    $.extend(data, {                                                       
+                                                        networkid: networkid                                                        
+                                                    });
 
                                                     if (!args.context.projects) {
                                                         $.extend(data, {


[12/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8101: volume sync not working as expected - MS restart during upload volume leaves volume in hung state.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ea634550
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ea634550
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ea634550

Branch: refs/heads/reporter
Commit: ea634550fcc257134ff2ffbc29016f86a0c2fa91
Parents: 4cb9505
Author: Min Chen <mi...@citrix.com>
Authored: Fri Dec 19 11:26:59 2014 -0800
Committer: Min Chen <mi...@citrix.com>
Committed: Fri Dec 19 15:51:52 2014 -0800

----------------------------------------------------------------------
 .../cloudstack/storage/volume/VolumeServiceImpl.java   | 13 +++++++++----
 .../cloud/storage/download/DownloadMonitorImpl.java    |  5 +++++
 2 files changed, 14 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ea634550/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 4f8255a..5d10c7f 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -1382,6 +1382,7 @@ public class VolumeServiceImpl implements VolumeService {
                         return;
                     }
 
+                    // we can only resume those uploaded volume with a URL specified
                     List<VolumeDataStoreVO> dbVolumes = _volumeStoreDao.listUploadedVolumesByStoreId(storeId);
                     List<VolumeDataStoreVO> toBeDownloaded = new ArrayList<VolumeDataStoreVO>(dbVolumes);
                     for (VolumeDataStoreVO volumeStore : dbVolumes) {
@@ -1405,7 +1406,7 @@ public class VolumeServiceImpl implements VolumeService {
                                 volumeStore.setDownloadState(Status.DOWNLOAD_ERROR);
                                 String msg = "Volume " + volume.getUuid() + " is corrupted on image store ";
                                 volumeStore.setErrorString(msg);
-                                s_logger.info("msg");
+                                s_logger.info(msg);
                                 if (volumeStore.getDownloadUrl() == null) {
                                     msg =
                                             "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() +
@@ -1454,7 +1455,6 @@ public class VolumeServiceImpl implements VolumeService {
                         if (volumeStore.getDownloadState() != Status.DOWNLOADED) {
                             s_logger.info("Volume Sync did not find " + volume.getName() + " ready on image store " + storeId +
                                     ", will request download to start/resume shortly");
-                            toBeDownloaded.add(volumeStore);
                         }
                     }
 
@@ -1477,8 +1477,13 @@ public class VolumeServiceImpl implements VolumeService {
                             }
 
                             s_logger.debug("Volume " + volumeHost.getVolumeId() + " needs to be downloaded to " + store.getName());
-                            // TODO: pass a callback later
-                            VolumeInfo vol = volFactory.getVolume(volumeHost.getVolumeId());
+                            // reset volume status back to Allocated
+                            VolumeObject vol = (VolumeObject)volFactory.getVolume(volumeHost.getVolumeId());
+                            vol.processEvent(Event.OperationFailed); // reset back volume status
+                            // remove leftover volume_store_ref entry since re-download will create it again
+                            _volumeStoreDao.remove(volumeHost.getId());
+                            // get an updated volumeVO
+                            vol = (VolumeObject)volFactory.getVolume(volumeHost.getVolumeId());
                             RegisterVolumePayload payload = new RegisterVolumePayload(volumeHost.getDownloadUrl(), volumeHost.getChecksum(), vol.getFormat().toString());
                             vol.addPayload(payload);
                             createVolumeAsync(vol, store);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ea634550/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
index eed1581..f1937f8 100644
--- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
+++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
@@ -219,6 +219,11 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor
             _volumeStoreDao.persist(volumeHost);
         } else if ((volumeHost.getJobId() != null) && (volumeHost.getJobId().length() > 2)) {
             downloadJobExists = true;
+        } else {
+            // persit url and checksum
+            volumeHost.setDownloadUrl(url);
+            volumeHost.setChecksum(checkSum);
+            _volumeStoreDao.update(volumeHost.getId(), volumeHost);
         }
 
         Long maxVolumeSizeInBytes = getMaxVolumeSizeInBytes();


[25/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8112. CS allows creation of VM's with the same Display name when vm.instancename.flag is set to true.
During VM creation, if vm.instancename.flag is set to true and hypervisor type is VMware, check if VM with the same hostname already exists in the zone.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/5f9e4fdd
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/5f9e4fdd
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/5f9e4fdd

Branch: refs/heads/reporter
Commit: 5f9e4fddf303f312a0b17abc0d837f28042caeda
Parents: 33179cc
Author: Likitha Shetty <li...@citrix.com>
Authored: Wed Nov 12 17:27:21 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 23 14:00:32 2014 +0530

----------------------------------------------------------------------
 engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java |  2 ++
 .../src/com/cloud/vm/dao/VMInstanceDaoImpl.java       | 14 ++++++++++++++
 server/src/com/cloud/vm/UserVmManagerImpl.java        |  6 ++++++
 3 files changed, 22 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5f9e4fdd/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java
index 6ba7c36..1e4c8b6 100644
--- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java
+++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java
@@ -136,4 +136,6 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
     void resetHostPowerStateTracking(long hostId);
 
     HashMap<String, Long> countVgpuVMs(Long dcId, Long podId, Long clusterId);
+
+    VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5f9e4fdd/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java
index df023bf..3eabbdb 100644
--- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java
+++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java
@@ -85,6 +85,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
     protected SearchBuilder<VMInstanceVO> HostUpSearch;
     protected SearchBuilder<VMInstanceVO> InstanceNameSearch;
     protected SearchBuilder<VMInstanceVO> HostNameSearch;
+    protected SearchBuilder<VMInstanceVO> HostNameAndZoneSearch;
     protected GenericSearchBuilder<VMInstanceVO, Long> FindIdsOfVirtualRoutersByAccount;
     protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByHost;
     protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByAccount;
@@ -218,6 +219,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
         HostNameSearch.and("hostName", HostNameSearch.entity().getHostName(), Op.EQ);
         HostNameSearch.done();
 
+        HostNameAndZoneSearch = createSearchBuilder();
+        HostNameAndZoneSearch.and("hostName", HostNameAndZoneSearch.entity().getHostName(), Op.EQ);
+        HostNameAndZoneSearch.and("zone", HostNameAndZoneSearch.entity().getDataCenterId(), Op.EQ);
+        HostNameAndZoneSearch.done();
+
         FindIdsOfVirtualRoutersByAccount = createSearchBuilder(Long.class);
         FindIdsOfVirtualRoutersByAccount.selectFields(FindIdsOfVirtualRoutersByAccount.entity().getId());
         FindIdsOfVirtualRoutersByAccount.and("account", FindIdsOfVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
@@ -415,6 +421,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
     }
 
     @Override
+    public VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId) {
+        SearchCriteria<VMInstanceVO> sc = HostNameAndZoneSearch.create();
+        sc.setParameters("hostName", hostName);
+        sc.setParameters("zone", zoneId);
+        return findOneBy(sc);
+    }
+
+    @Override
     public void updateProxyId(long id, Long proxyId, Date time) {
         VMInstanceVO vo = createForUpdate();
         vo.setProxyId(proxyId);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5f9e4fdd/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 03c9a23..605306e 100644
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -2942,6 +2942,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
                     hostName = generateHostName(uuidName);
                 }
             }
+            // If global config vm.instancename.flag is set to true, then CS will set guest VM's name as it appears on the hypervisor, to its hostname.
+            // In case of VMware since VM name must be unique within a DC, check if VM with the same hostname already exists in the zone.
+            VMInstanceVO vmByHostName = _vmInstanceDao.findVMByHostNameInZone(hostName, zone.getId());
+            if (vmByHostName != null && vmByHostName.getState() != VirtualMachine.State.Expunging) {
+                 throw new InvalidParameterValueException("There already exists a VM by the name: " + hostName + ".");
+            }
         } else {
             if (hostName == null) {
                 //Generate name using uuid and instance.name global config


[48/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8134. Worker VMs don't have MS id set in vCenter annotation 'cloud.vm.workertag'.
Correctly register node info for a newly created VMware context.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/13bdc1ce
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/13bdc1ce
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/13bdc1ce

Branch: refs/heads/reporter
Commit: 13bdc1cef4ee0fae2f263777317c170302c49689
Parents: c7b23d0
Author: Likitha Shetty <li...@citrix.com>
Authored: Tue Dec 30 11:48:06 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 30 11:52:56 2014 +0530

----------------------------------------------------------------------
 .../VmwareSecondaryStorageResourceHandler.java  | 23 +++++++++-----------
 1 file changed, 10 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/13bdc1ce/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java
index e67fed6..8a27799 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java
@@ -209,23 +209,20 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe
             _resource.ensureOutgoingRuleForAddress(vCenterAddress);
 
             VmwareContext context = currentContext.get();
-            if (context != null) {
-                if(!context.validate()) {
-                    invalidateServiceContext(context);
-                    context = null;
-                } else {
-                    context.registerStockObject("serviceconsole", cmd.getContextParam("serviceconsole"));
-                    context.registerStockObject("manageportgroup", cmd.getContextParam("manageportgroup"));
-                    context.registerStockObject("noderuninfo", cmd.getContextParam("noderuninfo"));
-                }
+            if (context != null && !context.validate()) {
+                invalidateServiceContext(context);
+                context = null;
             }
-            if(context == null) {
-                s_logger.info("Open new VmwareContext. vCenter: " + vCenterAddress + ", user: " + username + ", password: " +
-                        StringUtils.getMaskedPasswordForDisplay(password));
+            if (context == null) {
+                s_logger.info("Open new VmwareContext. vCenter: " + vCenterAddress + ", user: " + username + ", password: " + StringUtils.getMaskedPasswordForDisplay(password));
                 VmwareSecondaryStorageContextFactory.setVcenterSessionTimeout(vCenterSessionTimeout);
                 context = VmwareSecondaryStorageContextFactory.getContext(vCenterAddress, username, password);
             }
-
+            if (context != null) {
+                context.registerStockObject("serviceconsole", cmd.getContextParam("serviceconsole"));
+                context.registerStockObject("manageportgroup", cmd.getContextParam("manageportgroup"));
+                context.registerStockObject("noderuninfo", cmd.getContextParam("noderuninfo"));
+            }
             currentContext.set(context);
             return context;
         } catch (Exception e) {


[24/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8112. CS allows creation of VM's with the same Display name when vm.instancename.flag is set to true.
Before registering a VM check if a different CS VM with same name exists in vCenter.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/33179cce
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/33179cce
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/33179cce

Branch: refs/heads/reporter
Commit: 33179cce56b15f0632e38afa260cb829bb2a2273
Parents: 6475323
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Nov 10 10:58:11 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 23 13:48:01 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/vm/VirtualMachineManagerImpl.java |  3 ++
 .../vmware/resource/VmwareResource.java         | 19 ++++++++++--
 .../hypervisor/vmware/mo/DatacenterMO.java      | 32 ++++++++++++++++++++
 3 files changed, 52 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/33179cce/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index caf374e..07b2277 100644
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -1074,6 +1074,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                         }
                     }
                     s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
+                    if (startAnswer.getContextParam("stopRetry") != null) {
+                        break;
+                    }
 
                 } catch (OperationTimedoutException e) {
                     s_logger.debug("Unable to send the start command to host " + dest.getHost());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/33179cce/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 2e7553a..c2cf9e9 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -1324,6 +1324,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         }
 
         VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        boolean vmAlreadyExistsInVcenter = false;
 
         Pair<String, String> names = composeVmNames(vmSpec);
         String vmInternalCSName = names.first();
@@ -1335,6 +1336,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
 
             VmwareHypervisorHost hyperHost = getHyperHost(context);
+            DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
+
+            // Validate VM name is unique in Datacenter
+            VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
+            if(vmInVcenter != null) {
+                vmAlreadyExistsInVcenter = true;
+                String msg = "VM with name: " + vmNameOnVcenter +" already exists in vCenter.";
+                s_logger.error(msg);
+                throw new Exception(msg);
+            }
+
             DiskTO[] disks = validateDisks(vmSpec.getDisks());
             assert (disks.length > 0);
             NicTO[] nics = vmSpec.getNics();
@@ -1353,7 +1365,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 throw new Exception(msg);
             }
 
-            DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
             VirtualMachineDiskInfoBuilder diskInfoBuilder = null;
             VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
             boolean hasSnapshot = false;
@@ -1738,7 +1749,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
             String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e);
             s_logger.warn(msg, e);
-            return new StartAnswer(cmd, msg);
+            StartAnswer startAnswer = new StartAnswer(cmd, msg);
+            if(vmAlreadyExistsInVcenter) {
+                startAnswer.setContextParam("stopRetry", "true");
+            }
+            return startAnswer;
         } finally {
         }
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/33179cce/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java
index 39a30be..38b1565 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java
@@ -125,6 +125,38 @@ public class DatacenterMO extends BaseMO {
         return list;
     }
 
+    public VirtualMachineMO checkIfVmAlreadyExistsInVcenter(String vmNameOnVcenter, String vmNameInCS) throws Exception {
+        int key = getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_VM_INTERNAL_NAME);
+        if (key == 0) {
+            s_logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!");
+        }
+
+        List<ObjectContent> ocs = getVmPropertiesOnDatacenterVmFolder(new String[] {"name", String.format("value[%d]", key)});
+        if (ocs != null && ocs.size() > 0) {
+            for (ObjectContent oc : ocs) {
+                List<DynamicProperty> props = oc.getPropSet();
+                if (props != null) {
+                    String vmVcenterName = null;
+                    String vmInternalCSName = null;
+                    for (DynamicProperty prop : props) {
+                        if (prop.getName().equals("name")) {
+                            vmVcenterName = prop.getVal().toString();
+                        }
+                        if (prop.getName().startsWith("value[") && prop.getVal() != null) {
+                            vmInternalCSName = ((CustomFieldStringValue)prop.getVal()).getValue();
+                        }
+                    }
+                    if (vmNameOnVcenter.equals(vmVcenterName)) {
+                        if (vmInternalCSName != null && !vmInternalCSName.isEmpty() && !vmNameInCS.equals(vmInternalCSName)) {
+                            return (new VirtualMachineMO(_context, oc.getObj()));
+                        }
+                    }
+                }
+            }
+        }
+        return null;
+    }
+
     public List<Pair<ManagedObjectReference, String>> getAllVmsOnDatacenter() throws Exception {
         List<Pair<ManagedObjectReference, String>> vms = new ArrayList<Pair<ManagedObjectReference, String>>();
 


[23/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8111. NFS secondary storage repetitively mounted on CS server with ESXi hypervisors.
Fix cleanup of NFS mounts on management server during server starup by correcting how mount points are listed for a management server.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/64753237
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/64753237
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/64753237

Branch: refs/heads/reporter
Commit: 647532376fc090f75e6fa0ad66ee36241084dec9
Parents: 106ec71
Author: Likitha Shetty <li...@citrix.com>
Authored: Thu Mar 20 11:29:57 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 23 13:23:43 2014 +0530

----------------------------------------------------------------------
 core/src/com/cloud/storage/JavaStorageLayer.java      | 14 ++++++++++++++
 core/src/com/cloud/storage/StorageLayer.java          |  3 +++
 .../hypervisor/vmware/manager/VmwareManagerImpl.java  |  4 ++--
 3 files changed, 19 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/64753237/core/src/com/cloud/storage/JavaStorageLayer.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/storage/JavaStorageLayer.java b/core/src/com/cloud/storage/JavaStorageLayer.java
index 6b61ad0..2e5544c 100644
--- a/core/src/com/cloud/storage/JavaStorageLayer.java
+++ b/core/src/com/cloud/storage/JavaStorageLayer.java
@@ -143,6 +143,20 @@ public class JavaStorageLayer implements StorageLayer {
     }
 
     @Override
+    public List<String> listMountPointsByMsHost(String path, long msHostId) {
+        List<String> mountPaths = new ArrayList<String>();
+        File[] files = new File(path).listFiles();
+        if (files == null) {
+            return mountPaths;
+        }
+        for (File file : files) {
+            if (file.getName().startsWith(String.valueOf(msHostId) + "."))
+                mountPaths.add(file.getAbsolutePath());
+        }
+        return mountPaths;
+    }
+
+    @Override
     public boolean mkdir(String path) {
         synchronized (path.intern()) {
             File file = new File(path);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/64753237/core/src/com/cloud/storage/StorageLayer.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/storage/StorageLayer.java b/core/src/com/cloud/storage/StorageLayer.java
index 8daa392..8421aeb 100644
--- a/core/src/com/cloud/storage/StorageLayer.java
+++ b/core/src/com/cloud/storage/StorageLayer.java
@@ -21,6 +21,7 @@ package com.cloud.storage;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.List;
 
 import com.cloud.utils.component.Manager;
 
@@ -149,4 +150,6 @@ public interface StorageLayer extends Manager {
     boolean setWorldReadableAndWriteable(File file);
 
     boolean deleteDir(String dir);
+
+    List<String> listMountPointsByMsHost(String path, long msHostId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/64753237/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
index 3b2c73f..3416319 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java
@@ -696,8 +696,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
         long mshostId = ManagementServerNode.getManagementServerId();
 
         // cleanup left-over NFS mounts from previous session
-        String[] mounts = _storage.listFiles(parent + File.separator + String.valueOf(mshostId) + ".*");
-        if (mounts != null && mounts.length > 0) {
+        List<String> mounts = _storage.listMountPointsByMsHost(parent, mshostId);
+        if (mounts != null && !mounts.isEmpty()) {
             for (String mountPoint : mounts) {
                 s_logger.info("umount NFS mount from previous session: " + mountPoint);
 


[05/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8086: Simulator needs a Portable IP Range to execute Portable IP Test Cases

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/69669809
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/69669809
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/69669809

Branch: refs/heads/reporter
Commit: 696698090eb2ea548bfc74b802ba7dd01a584b91
Parents: 162f61b
Author: Chandan Purushothama <Ch...@citrix.com>
Authored: Wed Dec 17 18:22:54 2014 -0800
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Fri Dec 19 10:19:18 2014 +0530

----------------------------------------------------------------------
 tools/marvin/marvin/config/test_data.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/69669809/tools/marvin/marvin/config/test_data.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py
index 269f46e..9add0fa 100644
--- a/tools/marvin/marvin/config/test_data.py
+++ b/tools/marvin/marvin/config/test_data.py
@@ -1442,11 +1442,11 @@ test_data = {
     "configurableData":
     {
         "portableIpRange": {
-            "gateway": "",
-            "netmask": "",
-            "startip": "",
-            "endip": "",
-            "vlan": ""
+            "gateway": "10.223.59.1",
+            "netmask": "255.255.255.0",
+            "startip": "10.223.59.200",
+            "endip": "10.223.59.240",
+            "vlan": "1000"
         },
         "netscaler": {
             "ipaddress": "",


[27/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8113. VM migration fails with "Message: No such disk device: " error.
Consolidate VM disks once VM/volumes are migrated.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/cb211f18
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/cb211f18
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/cb211f18

Branch: refs/heads/reporter
Commit: cb211f18d14dcc9d988254a4b50b55ca0b080ed5
Parents: 77bd069
Author: Likitha Shetty <li...@citrix.com>
Authored: Tue Dec 23 14:20:34 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 23 14:28:17 2014 +0530

----------------------------------------------------------------------
 .../vmware/resource/VmwareResource.java         | 24 ++++++++++++++++++++
 .../hypervisor/vmware/mo/VirtualMachineMO.java  | 12 ++++++++++
 2 files changed, 36 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/cb211f18/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index c2cf9e9..5a16f03 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -3054,6 +3054,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)");
             }
 
+            // Consolidate VM disks.
+            // In case of a linked clone VM, if VM's disks are not consolidated,
+            // further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies.
+            String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext());
+            if (apiVersion.compareTo("5.0") >= 0) {
+                if (!vmMo.consolidateVmDisks()) {
+                    s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration.");
+                } else {
+                    s_logger.debug("Successfully consolidated disks of VM " + vmName + ".");
+                }
+            }
+
             // Update and return volume path for every disk because that could have changed after migration
             for (Entry<VolumeTO, StorageFilerTO> entry : volToFiler.entrySet()) {
                 volume = entry.getKey();
@@ -3163,6 +3175,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName);
             }
 
+            // Consolidate VM disks.
+            // In case of a linked clone VM, if VM's disks are not consolidated,
+            // further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies.
+            String apiVersion = HypervisorHostHelper.getVcenterApiVersion(vmMo.getContext());
+            if (apiVersion.compareTo("5.0") >= 0) {
+                if (!vmMo.consolidateVmDisks()) {
+                    s_logger.warn("VM disk consolidation failed after storage migration.");
+                } else {
+                    s_logger.debug("Successfully consolidated disks of VM " + vmName + ".");
+                }
+            }
+
             // Update and return volume path because that could have changed after migration
             if (!targetDsMo.fileExists(fullVolumePath)) {
                 VirtualDisk[] disks = vmMo.getAllDiskDevice();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/cb211f18/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index c2e9d7f..471b4a8 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -2638,4 +2638,16 @@ public class VirtualMachineMO extends BaseMO {
         }
         return guestOsSupportsMemoryHotAdd && virtualHardwareSupportsMemoryHotAdd;
     }
+
+    public boolean consolidateVmDisks() throws Exception {
+        ManagedObjectReference morTask = _context.getService().consolidateVMDisksTask(_mor);
+        boolean result = _context.getVimClient().waitForTask(morTask);
+        if (result) {
+            _context.waitForTaskProgressDone(morTask);
+            return true;
+        } else {
+            s_logger.error("VMware ConsolidateVMDisks_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask));
+        }
+        return false;
+    }
 }


[39/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8121. Data disk properties are not updated upon Creation/Deletion of VM snapshots.
Update the path and size of data volumes after snapshot creation/deletion by correctly trimming only the snapshot postfix of a disk.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ac491c96
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ac491c96
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ac491c96

Branch: refs/heads/reporter
Commit: ac491c96075d65e35157380fe7d28fdd917c0e90
Parents: b1bca2a
Author: Likitha Shetty <li...@citrix.com>
Authored: Thu Dec 4 19:20:45 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 13:04:12 2014 +0530

----------------------------------------------------------------------
 .../manager/VmwareStorageManagerImpl.java       | 34 ++------------------
 .../hypervisor/vmware/mo/VirtualMachineMO.java  | 14 +-------
 .../hypervisor/vmware/util/VmwareHelper.java    | 13 ++++++++
 3 files changed, 16 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ac491c96/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 86b1edf..1981303 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -1159,36 +1159,6 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
         return size;
     }
 
-    private String extractSnapshotBaseFileName(String input) {
-        if (input == null) {
-            return null;
-        }
-
-        String result = input;
-
-        final String fileType = ".vmdk";
-
-        if (result.endsWith(fileType)) {
-            // get rid of fileType
-            result = result.substring(0, result.length() - (fileType).length());
-        }
-
-        final String token = "-";
-
-        String[] str = result.split(token);
-        int length = str.length;
-
-        if (length == 1 || length == 2) {
-            return result;
-        }
-
-        if (length > 2) {
-            return str[0] + token + str[1];
-        }
-
-        return result;
-    }
-
     @Override
     public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSnapshotCommand cmd) {
         List<VolumeObjectTO> volumeTOs = cmd.getVolumeTOs();
@@ -1291,7 +1261,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
                         vmdkName = vmdkName.substring(vmdkName.indexOf(token) + token.length());
                     }
 
-                    baseName = extractSnapshotBaseFileName(vmdkName);
+                    baseName = VmwareHelper.trimSnapshotDeltaPostfix(vmdkName);
                 }
 
                 mapNewDisk.put(baseName, vmdkName);
@@ -1316,7 +1286,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
                 baseName = oldPath.substring(1, oldPath.length() - 1);
             }
             else {
-                baseName = extractSnapshotBaseFileName(volumeTO.getPath());
+                baseName = VmwareHelper.trimSnapshotDeltaPostfix(volumeTO.getPath());
             }
 
             String newPath = mapNewDisk.get(baseName);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ac491c96/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index 727903e..286aedd 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -34,7 +34,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 
 import com.google.gson.Gson;
@@ -1918,24 +1917,13 @@ public class VirtualMachineMO extends BaseMO {
         }
     }
 
-    private static String trimSnapshotDeltaPostfix(String name) {
-        String[] tokens = name.split("-");
-        if (tokens.length > 1 && tokens[tokens.length - 1].matches("[0-9]{6,}")) {
-            List<String> trimmedTokens = new ArrayList<String>();
-            for (int i = 0; i < tokens.length - 1; i++)
-                trimmedTokens.add(tokens[i]);
-            return StringUtils.join(trimmedTokens, "-");
-        }
-        return name;
-    }
-
     // return pair of VirtualDisk and disk device bus name(ide0:0, etc)
     public Pair<VirtualDisk, String> getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception {
         List<VirtualDevice> devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device");
 
         DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath);
         String srcBaseName = dsSrcFile.getFileBaseName();
-        String trimmedSrcBaseName = trimSnapshotDeltaPostfix(srcBaseName);
+        String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
 
         if (matchExactly) {
             s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ac491c96/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
index e38e7ee..b2b1090 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java
@@ -29,6 +29,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.UUID;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 
 import com.vmware.vim25.DistributedVirtualSwitchPortConnection;
@@ -703,4 +704,16 @@ public class VmwareHelper {
         // Object name that is greater than 32 is not safe in vCenter
         return UUID.randomUUID().toString().replaceAll("-", "");
     }
+
+    public static String trimSnapshotDeltaPostfix(String name) {
+        String[] tokens = name.split("-");
+        if (tokens.length > 1 && tokens[tokens.length - 1].matches("[0-9]{6,}")) {
+            List<String> trimmedTokens = new ArrayList<String>();
+            for (int i = 0; i < tokens.length - 1; i++)
+                trimmedTokens.add(tokens[i]);
+            return StringUtils.join(trimmedTokens, "-");
+        }
+        return name;
+    }
+
 }


[50/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
Proposal of a Usage Reporter / call-home functionality for CloudStack

With this commit the Management Server will be default generate a anonymous Usage
report every 7 (seven) days and submit this information back to the Apache CloudStack project.

These anonymous reports do NOT contain any information about Instance names, subnets, etc. It only
contains numbers about how CloudStack is being used.

This information is vital for the project to gain more insight in how CloudStack is being used.

Users can turn the reporting off by setting usage.report.interval to 0 (zero)


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b26f3fcb
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b26f3fcb
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b26f3fcb

Branch: refs/heads/reporter
Commit: b26f3fcb09805880a84651c8e82b96a877209194
Parents: 25a4f0d
Author: Wido den Hollander <wi...@widodh.nl>
Authored: Thu Nov 20 14:43:33 2014 +0100
Committer: Wido den Hollander <wi...@widodh.nl>
Committed: Fri Jan 2 15:21:17 2015 +0100

----------------------------------------------------------------------
 .../src/com/cloud/upgrade/dao/VersionDao.java   |   4 +
 .../com/cloud/upgrade/dao/VersionDaoImpl.java   |   9 +
 pom.xml                                         |   2 +-
 reporter/README.md                              |  18 +
 reporter/usage-report-collector.py              |  64 +++
 server/pom.xml                                  |  10 +
 .../spring-server-core-managers-context.xml     |   2 +
 server/src/com/cloud/configuration/Config.java  |   5 +-
 .../cloudstack/report/AtomicGsonAdapter.java    |  48 ++
 .../apache/cloudstack/report/UsageReporter.java | 470 +++++++++++++++++++
 setup/db/db/schema-450to460.sql                 |   1 +
 11 files changed, 631 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
index e280e0b..1a60f36 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
@@ -16,6 +16,8 @@
 // under the License.
 package com.cloud.upgrade.dao;
 
+import java.util.List;
+
 import com.cloud.upgrade.dao.VersionVO.Step;
 import com.cloud.utils.db.GenericDao;
 
@@ -23,4 +25,6 @@ public interface VersionDao extends GenericDao<VersionVO, Long> {
     VersionVO findByVersion(String version, Step step);
 
     String getCurrentVersion();
+
+    List<VersionVO> getAllVersions();
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
index 3be101b..344d8c0 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
@@ -170,4 +170,13 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
         }
 
     }
+
+    @Override
+    @DB
+    public List<VersionVO> getAllVersions() {
+        SearchCriteria<VersionVO> sc = AllFieldsSearch.create();
+        sc.setParameters("step", "Complete");
+
+        return listBy(sc);
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e63769e..94d7d46 100644
--- a/pom.xml
+++ b/pom.xml
@@ -58,7 +58,7 @@
     <cs.jasypt.version>1.9.2</cs.jasypt.version>
     <cs.trilead.version>1.0.0-build217</cs.trilead.version>
     <cs.ehcache.version>2.6.9</cs.ehcache.version>
-    <cs.gson.version>1.7.2</cs.gson.version>
+    <cs.gson.version>2.3.1</cs.gson.version>
     <cs.guava.version>18.0</cs.guava.version>
     <cs.xapi.version>6.2.0-3.1</cs.xapi.version>
     <cs.httpclient.version>4.3.6</cs.httpclient.version>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/reporter/README.md
----------------------------------------------------------------------
diff --git a/reporter/README.md b/reporter/README.md
new file mode 100644
index 0000000..6453fa4
--- /dev/null
+++ b/reporter/README.md
@@ -0,0 +1,18 @@
+# CloudStack Usage Report
+
+This directory contains the CloudStack reporter webservice used by the Apache CloudStack project
+to gather anonymous statistical information about CloudStack deployments.
+
+Since version <FIX ME!!> the management server sends out a anonymized Usage Report out to the
+project every 7 days.
+
+This information is used to gain information about how CloudStack is being used.
+
+Turning this Usage Reporting functionality off can be done in the Global Settings by setting
+'usage.report.interval' to 0.
+
+# The webservice
+The Python Flask application in this directory is the webservice running on https://reports.cloudstack.apache.org/ (FIX ME?)
+and stores all the incoming information in a ElasticSearch database.
+
+Since Apache CloudStack is Open Source we show not only how we generate the report, but also how we process it.

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/reporter/usage-report-collector.py
----------------------------------------------------------------------
diff --git a/reporter/usage-report-collector.py b/reporter/usage-report-collector.py
new file mode 100755
index 0000000..500a4d2
--- /dev/null
+++ b/reporter/usage-report-collector.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from flask import abort, Flask, request, Response
+from elasticsearch import Elasticsearch
+import json
+import time
+
+def json_response(response):
+    return json.dumps(response, indent=2) + "\n", 200, {'Content-Type': 'application/json; charset=utf-8'}
+
+def generate_app(config=None):
+    app = Flask(__name__)
+
+    @app.route('/report/<unique_id>', methods=['POST'])
+    def report(unique_id):
+        # We expect JSON data, so if the Content-Type doesn't match JSON data we throw an error
+        if 'Content-Type' in request.headers:
+            if request.headers['Content-Type'] != 'application/json':
+                abort(417, "No or incorrect Content-Type header was supplied")
+
+        index = "cloudstack-%s" % time.strftime("%Y.%m.%d", time.gmtime())
+        timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+        es = Elasticsearch()
+        es.indices.create(index=index, ignore=400)
+
+        report = json.loads(request.data)
+        report["unique_id"] = unique_id
+        report["timestamp"] = timestamp
+
+        es.index(index=index, doc_type="usage-report", body=json.dumps(report), timestamp=timestamp, refresh=True)
+
+        response = {}
+        return json_response(response)
+
+    return app
+
+
+app = generate_app()
+
+# Only run the App if this script is invoked from a Shell
+if __name__ == '__main__':
+    app.debug = True
+    app.run(host='0.0.0.0', port=8088)
+
+# Otherwise provide a variable called 'application' for mod_wsgi
+else:
+    application = app

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index c293aa1..f2a2a0e 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -138,6 +138,16 @@
       <artifactId>opensaml</artifactId>
       <version>${cs.opensaml.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>${cs.gson.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>${cs.guava.version}</version>
+    </dependency>
   </dependencies>
   <build>
     <testResources>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
----------------------------------------------------------------------
diff --git a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
index 6d8f32e..faa5286 100644
--- a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
+++ b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
@@ -222,6 +222,8 @@
 
     <bean id="statsCollector" class="com.cloud.server.StatsCollector" />
 
+    <bean id="usageReporter" class="org.apache.cloudstack.report.UsageReporter" />
+
     <bean id="storagePoolAutomationImpl" class="com.cloud.storage.StoragePoolAutomationImpl" />
 
     <bean id="domainManagerImpl" class="com.cloud.user.DomainManagerImpl" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/server/src/com/cloud/configuration/Config.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java
index af4da6a..a906f1b 100644
--- a/server/src/com/cloud/configuration/Config.java
+++ b/server/src/com/cloud/configuration/Config.java
@@ -2064,7 +2064,10 @@ public enum Config {
     PublishAsynJobEvent("Advanced", ManagementServer.class, Boolean.class, "publish.async.job.events", "true", "enable or disable publishing of usage events on the event bus", null),
 
     // StatsCollector
-    StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null);
+    StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null),
+
+    // Usage Reporting service
+    UsageReportInterval("Advanced", ManagementServer.class, Integer.class, "usage.report.interval", "7", "Interval (days) between sending anonymous Usage Reports back to the CloudStack project", null);
 
     private final String _category;
     private final Class<?> _componentClass;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
----------------------------------------------------------------------
diff --git a/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java b/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
new file mode 100644
index 0000000..23d83f1
--- /dev/null
+++ b/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.report;
+
+import com.google.gson.TypeAdapter;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.stream.JsonWriter;
+import com.google.common.util.concurrent.AtomicLongMap;
+import java.util.Map;
+import java.io.IOException;
+
+public class AtomicGsonAdapter extends TypeAdapter<AtomicLongMap> {
+
+    public AtomicLongMap<Object> read(JsonReader reader) throws IOException {
+        reader.nextNull();
+        return null;
+    }
+
+    public void write(JsonWriter writer, AtomicLongMap value) throws IOException {
+        if (value == null) {
+            writer.nullValue();
+            return;
+        }
+
+        @SuppressWarnings("unchecked")
+        Map <String, Long> map = value.asMap();
+
+        writer.beginObject();
+        for (Map.Entry<String, Long> entry : map.entrySet()) {
+            writer.name(entry.getKey()).value(entry.getValue());
+        }
+        writer.endObject();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/server/src/org/apache/cloudstack/report/UsageReporter.java
----------------------------------------------------------------------
diff --git a/server/src/org/apache/cloudstack/report/UsageReporter.java b/server/src/org/apache/cloudstack/report/UsageReporter.java
new file mode 100644
index 0000000..4656cc4
--- /dev/null
+++ b/server/src/org/apache/cloudstack/report/UsageReporter.java
@@ -0,0 +1,470 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.report;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.net.URL;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.MalformedURLException;
+import java.net.ProtocolException;
+import java.io.OutputStreamWriter;
+import java.io.IOException;
+
+import javax.inject.Inject;
+
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+
+import org.apache.commons.codec.digest.DigestUtils;
+
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.component.ComponentMethodInterceptable;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.TransactionLegacy;
+import com.cloud.upgrade.dao.VersionDao;
+import com.cloud.upgrade.dao.VersionVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.DiskOfferingVO;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.common.util.concurrent.AtomicLongMap;
+
+@Component
+public class UsageReporter extends ManagerBase implements ComponentMethodInterceptable {
+    public static final Logger s_logger = Logger.getLogger(UsageReporter.class.getName());
+
+    /* !FIX ME! This should point to a Apache Infra host with SSL! */
+    private String reportHost = "http://cs-report.widodh.nl:8088/report";
+
+    private String uniqueID = null;
+
+    private static UsageReporter s_instance = null;
+
+    private ScheduledExecutorService _executor = null;
+
+    @Inject
+    private ConfigurationDao _configDao;
+    @Inject
+    private HostDao _hostDao;
+    @Inject
+    private ClusterDao _clusterDao;
+    @Inject
+    private PrimaryDataStoreDao _storagePoolDao;
+    @Inject
+    private DataCenterDao _dataCenterDao;
+    @Inject
+    private UserVmDao _userVmDao;
+    @Inject
+    private VMInstanceDao _vmInstance;
+    @Inject
+    private VersionDao _versionDao;
+    @Inject
+    private DiskOfferingDao _diskOfferingDao;
+
+    int usageReportInterval = -1;
+
+    public static UsageReporter getInstance() {
+        return s_instance;
+    }
+
+    public static UsageReporter getInstance(Map<String, String> configs) {
+        s_instance.init(configs);
+        return s_instance;
+    }
+
+    public UsageReporter() {
+        s_instance = this;
+    }
+
+    @Override
+    public boolean start() {
+        init(_configDao.getConfiguration());
+        return true;
+    }
+
+    private void init(Map<String, String> configs) {
+        _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("UsageReporter"));
+
+        usageReportInterval = NumbersUtil.parseInt(configs.get("usage.report.interval"), 7);
+
+        if (usageReportInterval > 0) {
+            _executor.scheduleWithFixedDelay(new UsageCollector(), 7, usageReportInterval, TimeUnit.DAYS);
+        }
+
+        uniqueID = getUniqueId();
+    }
+
+    private void sendReport(String reportUri, String uniqueID, Map<String, Object> reportMap) {
+
+        GsonBuilder builder = new GsonBuilder();
+
+        AtomicGsonAdapter adapter = new AtomicGsonAdapter();
+        builder.registerTypeAdapter(AtomicLongMap.class, adapter);
+
+        Gson gson = builder.create();
+        String report = gson.toJson(reportMap);
+
+        int http_timeout = 15000;
+
+        try {
+            s_logger.info("Usage Report will be send to: " + reportUri);
+            s_logger.debug("REPORT: " + report);
+
+            URL url = new URL(reportUri + "/" + uniqueID);
+
+            HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+            conn.setConnectTimeout(http_timeout);
+            conn.setReadTimeout(http_timeout);
+            conn.setRequestMethod("POST");
+            conn.setDoOutput(true);
+            conn.setRequestProperty("Content-Type", "application/json");
+            conn.setRequestProperty("Accept", "application/json");
+
+            OutputStreamWriter osw = new OutputStreamWriter(conn.getOutputStream());
+            osw.write(report);
+            osw.flush();
+            osw.close();
+
+            int resp_code = conn.getResponseCode();
+
+            if (resp_code == HttpURLConnection.HTTP_OK){
+                s_logger.info("Usage Report succesfully send to: " + reportUri);
+            } else {
+                s_logger.warn("Failed to send Usage Report: " + conn.getResponseMessage());
+            }
+
+        } catch (SocketTimeoutException e) {
+            s_logger.warn("Sending Usage Report to " + reportUri + " timed out: " + e.getMessage());
+        } catch (MalformedURLException e) {
+            s_logger.warn(reportUri + " is a invalid URL for sending Usage Report to: "+ e.getMessage());
+        } catch (ProtocolException e) {
+            s_logger.warn("Sending Usage Report failed due to a invalid protocol: " + e.getMessage());
+        } catch (IOException e) {
+            s_logger.warn("Failed to write Usage Report due to a IOException: " + e.getMessage());
+        }
+    }
+
+    @DB
+    private String getUniqueId() {
+        String unique = null;
+        Connection conn = null;
+
+        try {
+            conn = TransactionLegacy.getStandaloneConnection();
+
+            PreparedStatement pstmt = conn.prepareStatement("SELECT version,updated FROM version ORDER BY id ASC LIMIT 1");
+            ResultSet rs = pstmt.executeQuery();
+            if (rs.next()) {
+                unique = DigestUtils.sha256Hex(rs.getString(1) + rs.getString(2));
+            } else {
+                s_logger.debug("No rows found in the version table. Unable to obtain unique ID for this environment");
+            }
+
+            rs.close();
+        } catch (SQLException e) {
+            s_logger.debug("Unable to get the unique ID of this environment: " + e.getMessage());
+        } finally {
+            try {
+                conn.close();
+            } catch (SQLException e) {
+            }
+        }
+
+        s_logger.debug("Usage Report Unique ID is: " + unique);
+
+        return unique;
+    }
+
+    private Map<String, AtomicLongMap> getHostReport() {
+        Map<String, AtomicLongMap> hostMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> host_types = AtomicLongMap.create();
+        AtomicLongMap<Object> host_hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> host_version = AtomicLongMap.create();
+
+        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
+        List<HostVO> hosts = _hostDao.search(host_sc, null);
+        for (HostVO host : hosts) {
+            host_types.getAndIncrement(host.getType());
+            if (host.getHypervisorType() != null) {
+                host_hypervisor_type.getAndIncrement(host.getHypervisorType());
+            }
+
+            host_version.getAndIncrement(host.getVersion());
+        }
+
+        hostMap.put("version", host_version);
+        hostMap.put("hypervisor_type", host_hypervisor_type);
+        hostMap.put("type", host_types);
+
+        return hostMap;
+    }
+
+    private Map<String, AtomicLongMap> getClusterReport() {
+        Map<String, AtomicLongMap> clusterMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> cluster_hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> cluster_types = AtomicLongMap.create();
+
+        SearchCriteria<ClusterVO> cluster_sc = _clusterDao.createSearchCriteria();
+        List<ClusterVO> clusters = _clusterDao.search(cluster_sc, null);
+        for (ClusterVO cluster : clusters) {
+            if (cluster.getClusterType() != null) {
+                cluster_types.getAndIncrement(cluster.getClusterType());
+            }
+
+            if (cluster.getHypervisorType() != null) {
+                cluster_hypervisor_type.getAndIncrement(cluster.getHypervisorType());
+            }
+        }
+
+        clusterMap.put("hypervisor_type", cluster_hypervisor_type);
+        clusterMap.put("type", cluster_types);
+
+        return clusterMap;
+    }
+
+    private Map<String, AtomicLongMap> getStoragePoolReport() {
+        Map<String, AtomicLongMap> storagePoolMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> storage_pool_types = AtomicLongMap.create();
+        AtomicLongMap<Object> storage_pool_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> storage_pool_scope = AtomicLongMap.create();
+
+        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
+        for (StoragePoolVO pool : storagePools) {
+            if (pool.getPoolType() != null) {
+                storage_pool_types.getAndIncrement(pool.getPoolType());
+            }
+
+            if (pool.getStorageProviderName() != null) {
+                storage_pool_provider.getAndIncrement(pool.getStorageProviderName());
+            }
+
+            if (pool.getScope() != null) {
+                storage_pool_scope.getAndIncrement(pool.getScope());
+            }
+        }
+
+        storagePoolMap.put("type", storage_pool_types);
+        storagePoolMap.put("provider", storage_pool_provider);
+        storagePoolMap.put("scope", storage_pool_scope);
+
+        return storagePoolMap;
+    }
+
+    private Map<String, AtomicLongMap> getDataCenterReport() {
+        Map<String, AtomicLongMap> datacenterMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> network_type = AtomicLongMap.create();
+        AtomicLongMap<Object> dns_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> dhcp_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> lb_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> firewall_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> gateway_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> userdata_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> vpn_provider = AtomicLongMap.create();
+
+        List<DataCenterVO> datacenters = _dataCenterDao.listAllZones();
+        for (DataCenterVO datacenter : datacenters) {
+            if (datacenter.getNetworkType() != null) {
+                network_type.getAndIncrement(datacenter.getNetworkType());
+            }
+
+            if (datacenter.getDnsProvider() != null) {
+                dns_provider.getAndIncrement(datacenter.getDnsProvider());
+            }
+
+            if (datacenter.getDhcpProvider() != null) {
+                dhcp_provider.getAndIncrement(datacenter.getDhcpProvider());
+            }
+
+            if (datacenter.getLoadBalancerProvider() != null) {
+                lb_provider.getAndIncrement(datacenter.getLoadBalancerProvider());
+            }
+
+            if (datacenter.getFirewallProvider() != null) {
+                firewall_provider.getAndIncrement(datacenter.getFirewallProvider());
+            }
+
+            if (datacenter.getGatewayProvider() != null) {
+                gateway_provider.getAndIncrement(datacenter.getGatewayProvider());
+            }
+
+            if (datacenter.getUserDataProvider() != null) {
+                userdata_provider.getAndIncrement(datacenter.getUserDataProvider());
+            }
+
+            if (datacenter.getVpnProvider() != null) {
+                vpn_provider.getAndIncrement(datacenter.getVpnProvider());
+            }
+        }
+
+        datacenterMap.put("network_type", network_type);
+        datacenterMap.put("dns_provider", dns_provider);
+        datacenterMap.put("dhcp_provider", dhcp_provider);
+        datacenterMap.put("lb_provider", lb_provider);
+        datacenterMap.put("firewall_provider", firewall_provider);
+        datacenterMap.put("gateway_provider", gateway_provider);
+        datacenterMap.put("userdata_provider", userdata_provider);
+        datacenterMap.put("vpn_provider", vpn_provider);
+
+        return datacenterMap;
+    }
+
+    private Map<String, AtomicLongMap> getInstanceReport() {
+
+        Map<String, AtomicLongMap> instanceMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> instance_state = AtomicLongMap.create();
+        AtomicLongMap<Object> instance_type = AtomicLongMap.create();
+        AtomicLongMap<Object> ha_enabled = AtomicLongMap.create();
+        AtomicLongMap<Object> dynamically_scalable = AtomicLongMap.create();
+
+        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
+        List<HostVO> hosts = _hostDao.search(host_sc, null);
+        for (HostVO host : hosts) {
+            List<UserVmVO> vms = _userVmDao.listByLastHostId(host.getId());
+            for (UserVmVO vm : vms) {
+                VMInstanceVO vmVO = _vmInstance.findById(vm.getId());
+
+                if (vmVO.getHypervisorType() != null) {
+                    hypervisor_type.getAndIncrement(vmVO.getHypervisorType());
+                }
+
+                if (vmVO.getState() != null) {
+                    instance_state.getAndIncrement(vmVO.getState());
+                }
+
+                if (vmVO.getType() != null) {
+                    instance_type.getAndIncrement(vmVO.getType());
+                }
+
+                ha_enabled.getAndIncrement(vmVO.isHaEnabled());
+                dynamically_scalable.getAndIncrement(vmVO.isDynamicallyScalable());
+            }
+        }
+
+        instanceMap.put("hypervisor_type", hypervisor_type);
+        instanceMap.put("state", instance_state);
+        instanceMap.put("type", instance_type);
+        instanceMap.put("ha_enabled", ha_enabled);
+        instanceMap.put("dynamically_scalable", dynamically_scalable);
+
+        return instanceMap;
+    }
+
+    private Map<String, Object> getDiskOfferingReport() {
+        Map<String, Object> diskOfferingReport = new HashMap<String, Object>();
+
+        AtomicLongMap<Object> system_use = AtomicLongMap.create();
+        AtomicLongMap<Object> provisioning_type = AtomicLongMap.create();
+        AtomicLongMap<Object> use_local_storage = AtomicLongMap.create();
+
+        List<DiskOfferingVO> private_offerings = _diskOfferingDao.findPrivateDiskOffering();
+        List<DiskOfferingVO> public_offerings = _diskOfferingDao.findPublicDiskOfferings();
+
+        List<DiskOfferingVO> offerings = new ArrayList<DiskOfferingVO>();
+        offerings.addAll(private_offerings);
+        offerings.addAll(public_offerings);
+
+        long disk_size = 0;
+        for (DiskOfferingVO offering : offerings) {
+            provisioning_type.getAndIncrement(offering.getProvisioningType());
+            system_use.getAndIncrement(offering.getSystemUse());
+            use_local_storage.getAndIncrement(offering.getUseLocalStorage());
+            disk_size += offering.getDiskSize();
+        }
+
+        diskOfferingReport.put("system_use", system_use);
+        diskOfferingReport.put("provisioning_type", provisioning_type);
+        diskOfferingReport.put("use_local_storage", use_local_storage);
+        diskOfferingReport.put("avg_disk_size", disk_size / offerings.size());
+
+        return diskOfferingReport;
+    }
+
+    private Map<String, String> getVersionReport() {
+        Map<String, String> versionMap = new HashMap<String, String>();
+
+        DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+        List<VersionVO> versions = _versionDao.getAllVersions();
+        for (VersionVO version : versions) {
+            versionMap.put(version.getVersion(), dateFormat.format(version.getUpdated()));
+        }
+
+        return versionMap;
+    }
+
+    private String getCurrentVersion() {
+        return _versionDao.getCurrentVersion();
+    }
+
+    class UsageCollector extends ManagedContextRunnable {
+        @Override
+        protected void runInContext() {
+            try {
+                s_logger.warn("UsageReporter is running...");
+
+                Map<String, Object> reportMap = new HashMap<String, Object>();
+
+                reportMap.put("hosts", getHostReport());
+                reportMap.put("clusters", getClusterReport());
+                reportMap.put("primaryStorage", getStoragePoolReport());
+                reportMap.put("zones", getDataCenterReport());
+                reportMap.put("instances", getInstanceReport());
+                reportMap.put("diskOffering", getDiskOfferingReport());
+                reportMap.put("versions", getVersionReport());
+                reportMap.put("current_version", getCurrentVersion());
+
+                sendReport(reportHost, uniqueID, reportMap);
+
+            } catch (Exception e) {
+                s_logger.warn("Failed to compile Usage Report: " + e.getMessage());
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b26f3fcb/setup/db/db/schema-450to460.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-450to460.sql b/setup/db/db/schema-450to460.sql
index 0493184..3ea8e8a 100644
--- a/setup/db/db/schema-450to460.sql
+++ b/setup/db/db/schema-450to460.sql
@@ -148,3 +148,4 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Domain Defaults', 'DEFAULT',
 INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Domain Defaults', 'DEFAULT', 'management-server', 'max.domain.primary.storage', '-1', 'The default maximum primary storage space (in GiB) that can be used for a domain', '-1', NULL, NULL, 0);
 INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Domain Defaults', 'DEFAULT', 'management-server', 'max.domain.secondary.storage', '-1', 'The default maximum secondary storage space (in GiB) that can be used for a domain', '-1', NULL, NULL, 0);
 
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT', 'management-server', "usage.report.interval", 7, "Interval (days) between sending anonymous Usage Reports back to the CloudStack project", "", NULL, NULL, 0);


[32/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8114. Ensure VM stop and then start updates the volume path correctly in the DB.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/521258ba
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/521258ba
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/521258ba

Branch: refs/heads/reporter
Commit: 521258bafb91e30d7bd43fba62cc47a77c078028
Parents: ceae978
Author: Likitha Shetty <li...@citrix.com>
Authored: Wed Nov 5 14:51:10 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 09:56:06 2014 +0530

----------------------------------------------------------------------
 .../src/com/cloud/vm/VirtualMachineManagerImpl.java            | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/521258ba/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 07b2277..be5ea63 100644
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -1207,7 +1207,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 // Use getPath() from VolumeVO to get a fresh copy of what's in the DB.
                 // Before doing this, in a certain situation, getPath() from VolumeObjectTO
                 // returned null instead of an actual path (because it was out of date with the DB).
-                volumeMgr.updateVolumeDiskChain(vol.getId(), volume.getPath(), vol.getChainInfo());
+                if(vol.getPath() != null) {
+                    volumeMgr.updateVolumeDiskChain(vol.getId(), vol.getPath(), vol.getChainInfo());
+                } else {
+                    volumeMgr.updateVolumeDiskChain(vol.getId(), volume.getPath(), vol.getChainInfo());
+                }
             }
         }
     }


[30/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8099: Fixed missing import in test_dynamic_compute_offering.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/bce67bc2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/bce67bc2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/bce67bc2

Branch: refs/heads/reporter
Commit: bce67bc282e12db15a2630af30a0bf9ea5d46c29
Parents: fc2c1a0
Author: Ashutosh K <as...@clogeny.com>
Authored: Mon Dec 22 15:40:39 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Tue Dec 23 15:03:30 2014 +0530

----------------------------------------------------------------------
 .../component/test_dynamic_compute_offering.py  | 944 ++++++++++++-------
 1 file changed, 620 insertions(+), 324 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/bce67bc2/test/integration/component/test_dynamic_compute_offering.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_dynamic_compute_offering.py b/test/integration/component/test_dynamic_compute_offering.py
index 1e47579..5215f84 100644
--- a/test/integration/component/test_dynamic_compute_offering.py
+++ b/test/integration/component/test_dynamic_compute_offering.py
@@ -17,13 +17,15 @@
 
 """ Tests for Dynamic Compute Offering Feature
 
-    Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Dynamic+ComputeOffering
+    Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK
+    /Dynamic+ComputeOffering
 
     Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-6147
 
-    Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Dynamic+Compute+Offering+FS
+    Feature Specifications: https://cwiki.apache.org/confluence/display/
+    CLOUDSTACK/Dynamic+Compute+Offering+FS
 """
-from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackTestCase import cloudstackTestCase, unittest
 from marvin.lib.utils import (cleanup_resources,
                               validateList,
                               random_gen,
@@ -43,8 +45,10 @@ from nose.plugins.attrib import attr
 from marvin.codes import PASS, ADMIN_ACCOUNT, USER_ACCOUNT, FAILED
 from ddt import ddt, data
 
+
 @ddt
 class TestDynamicServiceOffering(cloudstackTestCase):
+
     """Test Dynamic Service Offerings
     """
 
@@ -65,7 +69,8 @@ class TestDynamicServiceOffering(cloudstackTestCase):
             cls.services["ostype"]
         )
         if cls.template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
+            assert False, "get_template() failed to return template\
+                    with description %s" % cls.services["ostype"]
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
         cls._cleanup = []
@@ -101,14 +106,14 @@ class TestDynamicServiceOffering(cloudstackTestCase):
             raise Exception("Warning: Exception during cleanup : %s" % e)
         return
 
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_create_normal_compute_offering(self):
         """ Create normal compute offering with non zero values for cpu,
             cpu number and memory"""
 
         # Steps:
-        # 1. Create normal compute offering with non zero values for cpu number,
-        #    cpu speed, memory
+        # 1. Create normal compute offering with non zero values
+        # for cpu number, cpu speed, memory
 
         # Validations:
         # 1. Compute offering should be created
@@ -117,16 +122,20 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = 256
         self.services["service_offering"]["memory"] = 128
 
-        serviceOffering = ServiceOffering.create(self.api_client,
-                                                 self.services["service_offering"]
-                                                 )
-        self.assertEqual(verifyComputeOfferingCreation(self.apiclient, serviceOffering.id),
-                         PASS, "Compute Offering verification failed")
+        serviceOffering = ServiceOffering.create(
+            self.api_client,
+            self.services["service_offering"])
+        self.assertEqual(
+            verifyComputeOfferingCreation(
+                self.apiclient,
+                serviceOffering.id),
+            PASS,
+            "Compute Offering verification failed")
 
         self.cleanup_co.append(serviceOffering)
         return
 
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_create_dynamic_compute_offering(self):
         """ Create dynamic compute offering with cpunumber, cpuspeed and memory
             not specified"""
@@ -142,16 +151,20 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering = ServiceOffering.create(self.api_client,
-                                                 self.services["service_offering"]
-                                                 )
-        self.assertEqual(verifyComputeOfferingCreation(self.apiclient, serviceOffering.id),
-                         PASS, "Compute Offering verification failed")
+        serviceOffering = ServiceOffering.create(
+            self.api_client,
+            self.services["service_offering"])
+        self.assertEqual(
+            verifyComputeOfferingCreation(
+                self.apiclient,
+                serviceOffering.id),
+            PASS,
+            "Compute Offering verification failed")
 
         self.cleanup_co.append(serviceOffering)
         return
 
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_create_dynamic_compute_offering_no_cpunumber(self):
         """ Create dynamic compute offering with only cpunumber unspecified"""
 
@@ -163,16 +176,17 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["memory"] = 128
 
         try:
-            serviceOffering = ServiceOffering.create(self.api_client,
-                                                 self.services["service_offering"]
-                                                 )
+            serviceOffering = ServiceOffering.create(
+                self.api_client,
+                self.services["service_offering"])
             self.cleanup_co.append(serviceOffering)
-            self.fail("Compute Offering creation succeded, it should have failed")
+            self.fail(
+                "Compute Offering creation succeded, it should have failed")
         except Exception:
             self.debug("Compute Offering Creation failed as expected")
         return
 
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_create_dynamic_compute_offering_no_cpuspeed(self):
         """ Create dynamic compute offering with only cpuspeed unspecified"""
 
@@ -184,16 +198,17 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["memory"] = 128
 
         try:
-            serviceOffering = ServiceOffering.create(self.api_client,
-                                                 self.services["service_offering"]
-                                                 )
+            serviceOffering = ServiceOffering.create(
+                self.api_client,
+                self.services["service_offering"])
             self.cleanup_co.append(serviceOffering)
-            self.fail("Compute Offering creation succeded, it should have failed")
+            self.fail(
+                "Compute Offering creation succeded, it should have failed")
         except Exception:
             self.debug("Compute Offering Creation failed as expected")
         return
 
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_create_dynamic_compute_offering_no_memory(self):
         """ Create dynamic compute offering with only memory unspecified"""
 
@@ -205,17 +220,18 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["memory"] = ""
 
         try:
-            serviceOffering = ServiceOffering.create(self.api_client,
-                                                 self.services["service_offering"]
-                                                 )
+            serviceOffering = ServiceOffering.create(
+                self.api_client,
+                self.services["service_offering"])
             self.cleanup_co.append(serviceOffering)
-            self.fail("Compute Offering creation succeded, it should have failed")
+            self.fail(
+                "Compute Offering creation succeded, it should have failed")
         except Exception:
             self.debug("Compute Offering Creation failed as expected")
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_deploy_virtual_machines_static_offering(self, value):
         """Test deploy VM with static offering"""
 
@@ -223,22 +239,26 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         # 1. Create admin/user account and create its user api client
         # 2. Create a static compute offering
         # 3. Deploy a VM with account api client and static service offering
-        # 4. Repeat step 3 but also pass custom values for cpu number, cpu speed and memory
-        #    while deploying VM
+        # 4. Repeat step 3 but also pass custom values for cpu number,
+        #    cpu speed and memory while deploying VM
 
         # Validations:
         # 1. Step 3 should succeed
         # 2. Step 4 should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create Account
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create service offering
@@ -246,34 +266,41 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = 256
         self.services["service_offering"]["memory"] = 128
 
-        serviceOffering = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering)
 
         # Deploy VM with static service offering
         try:
-            VirtualMachine.create(apiclient,self.services["virtual_machine"],
-                                                    serviceofferingid=serviceOffering.id,
-                                                    accountid=self.account.name,domainid=self.account.domainid)
+            VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
+                serviceofferingid=serviceOffering.id,
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Deploy VM with static service offering, also with custom values
         try:
-            VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering.id,
                 customcpunumber=4,
                 customcpuspeed=512,
                 custommemory=256,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
             self.fail("VM creation should have failed, it succeeded")
         except Exception as e:
             self.debug("vm creation failed as expected: %s" % e)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_deploy_virtual_machines_dynamic_offering(self, value):
         """Test deploy VM with dynamic compute offering"""
 
@@ -281,26 +308,31 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         # 1. Create admin/user account and create its user api client
         # 2. Create a dynamic service offering
         # 3. Deploy a VM with account api client and dynamic service offering
-        #    without providing custom values for cpu number, cpu speed and memory
-        # 4. Deploy a VM with account api client and dynamic service offering providing
-        #    custom values for cpu number, cpu speed and memory
-        # 5. Deploy a VM with account api client and dynamic service offering providing
-        #    custom values only for cpu number
+        #    without providing custom values for cpu number, cpu speed and
+        #    memory
+        # 4. Deploy a VM with account api client and dynamic service offering
+        #    providing custom values for cpu number, cpu speed and memory
+        # 5. Deploy a VM with account api client and dynamic service offering
+        #    providing custom values only for cpu number
 
         # Validations:
         # 1. Step 3 should fail
         # 2. Step 4 should succeed
         # 3. Step 5 should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create Account and its api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create dynamic service offering
@@ -308,17 +340,22 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering)
 
-        # Deploy VM with dynamic compute offering without providing custom values for
+        # Deploy VM with dynamic compute offering without providing
+        # custom values for
         # cpu number, cpu speed and memory
         try:
-            VirtualMachine.create(apiclient,self.services["virtual_machine"],
-                                                    serviceofferingid=serviceOffering.id,
-                                                    accountid=self.account.name,domainid=self.account.domainid)
+            VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
+                serviceofferingid=serviceOffering.id,
+                accountid=self.account.name,
+                domainid=self.account.domainid)
             self.fail("VM creation succeded, it should have failed")
         except Exception as e:
             self.debug("vm creation failed as expected with error: %s" % e)
@@ -326,29 +363,35 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         # Deploy VM with dynamic compute offering providing custom values for
         # cpu number, cpu speed and memory
         try:
-            VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering.id,
                 customcpunumber=2,
                 customcpuspeed=256,
                 custommemory=128,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
-        # Deploy VM with dynamic compute offering providing custom values for only
-        # cpu number
+        # Deploy VM with dynamic compute offering providing custom values
+        # for only cpu number
         try:
-            VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering.id,
                 customcpunumber=2,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
             self.fail("VM deployment should have failed, it succeded")
         except Exception as e:
             self.debug("vm creation failed as expected: %s" % e)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_check_vm_stats(self, value):
         """Deploy VM with dynamic service offering and check VM stats"""
 
@@ -359,15 +402,19 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         #    providing custom values for cpu number, cpu speed and memory
         # 4. List the VM and verify the dynamic parameters are same as passed
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create Account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create dynamic compute offering
@@ -375,8 +422,9 @@ class TestDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering)
 
@@ -387,41 +435,51 @@ class TestDynamicServiceOffering(cloudstackTestCase):
 
         # Deploy VM with dynamic service offering and the custom values
         try:
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering.id,
                 customcpunumber=customcpunumber,
                 customcpuspeed=customcpuspeed,
                 custommemory=custommemory,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         vmlist = VirtualMachine.list(self.apiclient, id=virtualMachine.id)
-        self.assertEqual(validateList(vmlist)[0], PASS, "vm list validation failed")
+        self.assertEqual(
+            validateList(vmlist)[0],
+            PASS,
+            "vm list validation failed")
         vm = vmlist[0]
 
         # Verify the custom values
         self.assertEqual(str(vm.cpunumber), str(customcpunumber), "vm cpu number %s\
-                 not matching with provided custom cpu number %s" % \
-                 (vm.cpunumber, customcpunumber))
+                 not matching with provided custom cpu number %s" %
+                         (vm.cpunumber, customcpunumber))
 
         self.assertEqual(str(vm.cpuspeed), str(customcpuspeed), "vm cpu speed %s\
-                 not matching with provided custom cpu speed %s" % \
-                 (vm.cpuspeed, customcpuspeed))
+                 not matching with provided custom cpu speed %s" %
+                         (vm.cpuspeed, customcpuspeed))
 
         self.assertEqual(str(vm.memory), str(custommemory), "vm memory %s\
-                 not matching with provided custom memory %s" % \
-                 (vm.memory, custommemory))
+                 not matching with provided custom memory %s" %
+                         (vm.memory, custommemory))
         return
 
+
 @ddt
 class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
+
     """Test scaling VMs with dynamic Service Offerings
     """
 
     @classmethod
     def setUpClass(cls):
-        cloudstackTestClient = super(TestScaleVmDynamicServiceOffering,cls).getClsTestClient()
+        cloudstackTestClient = super(
+            TestScaleVmDynamicServiceOffering,
+            cls).getClsTestClient()
         cls.api_client = cloudstackTestClient.getApiClient()
         cls.hypervisor = cloudstackTestClient.getHypervisorInfo()
 
@@ -430,29 +488,35 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
 
         # Get Zone, Domain and templates
         cls.domain = get_domain(cls.api_client)
-        cls.zone = get_zone(cls.api_client, cloudstackTestClient.getZoneForTests())
+        cls.zone = get_zone(
+            cls.api_client,
+            cloudstackTestClient.getZoneForTests())
         cls.mode = str(cls.zone.networktype).lower()
         cls.template = get_template(
-                            cls.api_client,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
 
         cls._cleanup = []
-        cls.serviceOffering_static_1 = ServiceOffering.create(cls.api_client,
-                                                 cls.services["service_offering"])
+        cls.serviceOffering_static_1 = ServiceOffering.create(
+            cls.api_client,
+            cls.services["service_offering"])
         cls._cleanup.append(cls.serviceOffering_static_1)
 
         if cls.hypervisor.lower() == "vmware":
-            virtual_machine = VirtualMachine.create(cls.api_client,cls.services["virtual_machine"],
-                serviceofferingid=cls.serviceOffering_static_1.id, mode=cls.zone.networktype)
+            virtual_machine = VirtualMachine.create(
+                cls.api_client,
+                cls.services["virtual_machine"],
+                serviceofferingid=cls.serviceOffering_static_1.id,
+                mode=cls.zone.networktype)
             cls._cleanup.append(virtual_machine)
             sshClient = virtual_machine.get_ssh_client()
             result = str(
                 sshClient.execute("service vmware-tools status")).lower()
-            if not "running" in result:
+            if "running" not in result:
                 cls.tearDownClass()
                 raise unittest.SkipTest("Skipping scale VM operation because\
                     VMware tools are not installed on the VM")
@@ -489,7 +553,7 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_change_so_stopped_vm_static_to_static(self, value):
         """Test scale stopped VM from static offering to static offering"""
 
@@ -503,51 +567,63 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # Validations:
         # 1. Scaling operation should be successful
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         try:
             # Create Account
-            self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+            self.account = Account.create(
+                self.apiclient,
+                self.services["account"],
+                domainid=self.domain.id,
+                admin=isadmin)
             self.cleanup.append(self.account)
             apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+                UserName=self.account.name,
+                DomainName=self.account.domain)
 
             # Create static service offerings (Second offering should have
-            # one of the custom values greater than 1st one, scaling down is not allowed
+            # one of the custom values greater than 1st one, scaling down is
+            # not allowed
             self.services["service_offering"]["cpunumber"] = "2"
             self.services["service_offering"]["cpuspeed"] = "256"
             self.services["service_offering"]["memory"] = "128"
 
-            serviceOffering_static_1 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_static_1 = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.services["service_offering"]["cpunumber"] = "4"
 
-            serviceOffering_static_2 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_static_2 = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.cleanup_co.append(serviceOffering_static_1)
             self.cleanup_co.append(serviceOffering_static_2)
 
             # Deploy VM
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static_1.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
 
             # Stop VM
             virtualMachine.stop(apiclient)
 
             # Scale VM to new static service offering
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_static_2.id)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_static_2.id)
         except Exception as e:
             self.fail("Exception occured: %s" % e)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_change_so_stopped_vm_static_to_dynamic(self, value):
         """Test scale stopped VM from static offering to dynamic offering"""
 
@@ -559,22 +635,27 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # 5. Scale VM with dynamic service offering providing all required
         #    custom values
         # 6. Deploy another VM with static offring and stop the VM
-        # 7. Scale VM with dynamic service offering providing only custom cpu number
+        # 7. Scale VM with dynamic service offering providing only custom cpu
+        # number
 
         # Validations:
         # 1. Scale operation in step 5 should be successful
         # 2. Scale operation in step 7 should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         try:
             # Create Account and api client
-            self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+            self.account = Account.create(
+                self.apiclient,
+                self.services["account"],
+                domainid=self.domain.id,
+                admin=isadmin)
             apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+                UserName=self.account.name,
+                DomainName=self.account.domain)
             self.cleanup.append(self.account)
 
             # Create static and dynamic service offerings
@@ -582,50 +663,64 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
             self.services["service_offering"]["cpuspeed"] = "256"
             self.services["service_offering"]["memory"] = "128"
 
-            serviceOffering_static = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
-
+            serviceOffering_static = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.services["service_offering"]["cpunumber"] = ""
             self.services["service_offering"]["cpuspeed"] = ""
             self.services["service_offering"]["memory"] = ""
 
-            serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_dynamic = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.cleanup_co.append(serviceOffering_static)
             self.cleanup_co.append(serviceOffering_dynamic)
 
             # Deploy VM with static service offering
-            virtualMachine_1 = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine_1 = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
 
             # Stop VM
             virtualMachine_1.stop(apiclient)
 
             # Scale VM to dynamic service offering proving all custom values
-            virtualMachine_1.scale(apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4, customcpuspeed=256, custommemory=128)
+            virtualMachine_1.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4,
+                customcpuspeed=256,
+                custommemory=128)
 
             # Deploy VM with static service offering
-            virtualMachine_2 = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine_2 = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
 
             # Stop VM
             virtualMachine_2.stop(apiclient)
         except Exception as e:
             self.fail("Exception occuered: %s" % e)
 
-            # Scale VM to dynamic service offering proving only custom cpu number
+            # Scale VM to dynamic service offering proving only custom cpu
+            # number
         with self.assertRaises(Exception):
-            virtualMachine_2.scale(apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4)
+            virtualMachine_2.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_change_so_stopped_vm_dynamic_to_static(self, value):
         """Test scale stopped VM from dynamic offering to static offering"""
 
@@ -639,16 +734,20 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # Validations:
         # 1. Scale operation in step 5 should be successful
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         try:
             # Create account and api client
-            self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+            self.account = Account.create(
+                self.apiclient,
+                self.services["account"],
+                domainid=self.domain.id,
+                admin=isadmin)
             apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+                UserName=self.account.name,
+                DomainName=self.account.domain)
             self.cleanup.append(self.account)
 
             # Create dynamic and static service offering
@@ -656,36 +755,45 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
             self.services["service_offering"]["cpuspeed"] = ""
             self.services["service_offering"]["memory"] = ""
 
-            serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_dynamic = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.services["service_offering"]["cpunumber"] = "4"
             self.services["service_offering"]["cpuspeed"] = "256"
             self.services["service_offering"]["memory"] = "128"
 
-            serviceOffering_static = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_static = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.cleanup_co.append(serviceOffering_static)
             self.cleanup_co.append(serviceOffering_dynamic)
 
             # Deploy VM with dynamic service offering
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128)
 
             # Stop VM and verify that it is in stopped state
             virtualMachine.stop(apiclient)
 
             # Scale VM to static service offering
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_static.id)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_static.id)
         except Exception as e:
             self.fail("Exception occured: %s" % e)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_change_so_stopped_vm_dynamic_to_dynamic(self, value):
         """Test scale stopped VM from dynamic offering to dynamic offering"""
 
@@ -704,16 +812,20 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # 2. Scale operation in step 6 should be successful
         # 3. Scale operation in step 7 should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         try:
             # Create Account
-            self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+            self.account = Account.create(
+                self.apiclient,
+                self.services["account"],
+                domainid=self.domain.id,
+                admin=isadmin)
             apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+                UserName=self.account.name,
+                DomainName=self.account.domain)
             self.cleanup.append(self.account)
 
             # Create dynamic service offerings
@@ -721,43 +833,60 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
             self.services["service_offering"]["cpuspeed"] = ""
             self.services["service_offering"]["memory"] = ""
 
-            serviceOffering_dynamic_1 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_dynamic_1 = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
-            serviceOffering_dynamic_2 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+            serviceOffering_dynamic_2 = ServiceOffering.create(
+                self.apiclient,
+                self.services["service_offering"])
 
             self.cleanup_co.append(serviceOffering_dynamic_1)
             self.cleanup_co.append(serviceOffering_dynamic_2)
 
             # Deploy VM with dynamic service offering
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic_1.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128)
 
             # Stop VM
             virtualMachine.stop(apiclient)
 
             # Scale VM with same dynamic service offering
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_1.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=256)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_1.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=256)
 
             # Scale VM with other dynamic service offering
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_2.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=256)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_2.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=256)
         except Exception as e:
             self.fail("Exception occured: %s" % e)
 
         # Scale VM with dynamic service offering proving custom value
         # only for cpu number
         with self.assertRaises(Exception):
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_1.id,
-                                 customcpunumber=4)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_1.id,
+                customcpunumber=4)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"],required_hardware="true")
+    @attr(tags=["basic", "advanced"],required_hardware="true")
     def test_change_so_running_vm_static_to_static(self, value):
         """Test scale running VM from static offering to static offering"""
 
@@ -772,17 +901,22 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
 
         hypervisor = get_hypervisor_type(self.apiclient)
         if hypervisor.lower() == "kvm":
-            self.skipTest("Scaling VM in running state is not supported on KVM")
+            self.skipTest(
+                "Scaling VM in running state is not supported on KVM")
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create Account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create static service offerings
@@ -790,34 +924,41 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = "256"
         self.services["service_offering"]["memory"] = "128"
 
-        serviceOffering_static_1 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_static_1 = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.services["service_offering"]["cpunumber"] = "4"
 
-        serviceOffering_static_2 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_static_2 = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_static_1)
         self.cleanup_co.append(serviceOffering_static_2)
 
         # Deploy VM with static service offering
         try:
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static_1.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM to other static service offering
         try:
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_static_2.id)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_static_2.id)
         except Exception as e:
             self.fail("Failure while changing service offering: %s" % e)
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"],required_hardware="true")
+    @attr(tags=["basic", "advanced"],required_hardware="true")
     def test_change_so_running_vm_static_to_dynamic(self, value):
         """Test scale running VM from static offering to dynamic offering"""
 
@@ -828,7 +969,8 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # 4. Scale VM with dynamic service offering providing all required
         #    custom values
         # 5. Deploy another VM with static offring
-        # 6. Scale VM with dynamic service offering providing only custom cpu number
+        # 6. Scale VM with dynamic service offering providing only custom cpu
+        # number
 
         # Validations:
         # 1. Scale operation in step 4 should be successful
@@ -836,17 +978,22 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
 
         hypervisor = get_hypervisor_type(self.apiclient)
         if hypervisor.lower() == "kvm":
-            self.skipTest("Scaling VM in running state is not supported on KVM")
+            self.skipTest(
+                "Scaling VM in running state is not supported on KVM")
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Crate account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create static and dynamic service offerings
@@ -854,53 +1001,70 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = "256"
         self.services["service_offering"]["memory"] = "128"
 
-        serviceOffering_static = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
-
+        serviceOffering_static = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.services["service_offering"]["cpunumber"] = ""
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_static)
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # Deploy VM with static service offering
         try:
-            virtualMachine_1 = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine_1 = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM to dynamic service offering
         try:
-            virtualMachine_1.scale(apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4, customcpuspeed=256, custommemory=128)
+            virtualMachine_1.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4,
+                customcpuspeed=256,
+                custommemory=128)
         except Exception as e:
             self.fail("Failure while changing service offering: %s" % e)
 
         try:
-            virtualMachine_2 = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine_2 = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_static.id,
-                accountid=self.account.name,domainid=self.account.domainid)
+                accountid=self.account.name,
+                domainid=self.account.domainid)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         try:
-            virtualMachine_2.scale(apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4)
-            self.fail("Changing service offering with incomplete data should have failed, it succeded")
+            virtualMachine_2.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4)
+            self.fail(
+                "Changing service offering with incomplete data should\
+                 have failed, it succeded")
         except Exception as e:
-            self.debug("Failure while changing service offering as expected: %s" % e)
+            self.debug(
+                "Failure while changing service offering as expected: %s" %
+                e)
 
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"],required_hardware="true")
+    @attr(tags=["basic", "advanced"],required_hardware="true")
     def test_change_so_running_vm_dynamic_to_static(self, value):
         """Test scale running VM from dynamic offering to static offering"""
 
@@ -914,17 +1078,22 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         # 1. Scale operation in step 4 should be successful
         hypervisor = get_hypervisor_type(self.apiclient)
         if hypervisor.lower() == "kvm":
-            self.skipTest("Scaling VM in running state is not supported on KVM")
+            self.skipTest(
+                "Scaling VM in running state is not supported on KVM")
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create dynamic and static service offerings
@@ -932,38 +1101,47 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.services["service_offering"]["cpunumber"] = "4"
         self.services["service_offering"]["cpuspeed"] = "256"
         self.services["service_offering"]["memory"] = "128"
 
-        serviceOffering_static = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_static = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_static)
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # deploy VM with dynamic service offering
         try:
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM to static service offering
         try:
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_static.id)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_static.id)
         except Exception as e:
             self.fail("Failure while changing service offering: %s" % e)
 
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"],required_hardware="true")
+    @attr(tags=["basic", "advanced"],required_hardware="true")
     def test_change_so_running_vm_dynamic_to_dynamic(self, value):
         """Test scale running VM from dynamic offering to dynamic offering"""
 
@@ -983,17 +1161,22 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
 
         hypervisor = get_hypervisor_type(self.apiclient)
         if hypervisor.lower() == "kvm":
-            self.skipTest("Scaling VM in running state is not supported on KVM")
+            self.skipTest(
+                "Scaling VM in running state is not supported on KVM")
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         apiclient = self.testClient.getUserApiClient(
-                                    UserName=self.account.name,
-                                    DomainName=self.account.domain)
+            UserName=self.account.name,
+            DomainName=self.account.domain)
         self.cleanup.append(self.account)
 
         # Create dynamic service offerings
@@ -1001,56 +1184,78 @@ class TestScaleVmDynamicServiceOffering(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic_1 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic_1 = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
-        serviceOffering_dynamic_2 = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic_2 = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic_1)
         self.cleanup_co.append(serviceOffering_dynamic_2)
 
         # Deploy VM with dynamic service offering
         try:
-            virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic_1.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM with same dynamic offering
         try:
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_1.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=256)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_1.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=256)
         except Exception as e:
             self.fail("Failure while changing service offering: %s" % e)
 
         # Scale VM with other dynamic service offering
         try:
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_2.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=512)
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_2.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=512)
         except Exception as e:
             self.fail("Failure while changing service offering: %s" % e)
 
-        # Scale VM with dynamic offering proving custom value only for cpu number
+        # Scale VM with dynamic offering proving custom value only for cpu
+        # number
         try:
-            virtualMachine.scale(apiclient, serviceOfferingId=serviceOffering_dynamic_1.id,
-                                 customcpunumber=4)
-            self.fail("Changing service offering should have failed, it succeded")
+            virtualMachine.scale(
+                apiclient,
+                serviceOfferingId=serviceOffering_dynamic_1.id,
+                customcpunumber=4)
+            self.fail(
+                "Changing service offering should have failed, it succeded")
         except Exception as e:
             self.debug("Failure while changing service offering: %s" % e)
 
         return
 
+
 @ddt
 class TestAccountLimits(cloudstackTestCase):
-    """Test max limit of account (cpunumber and memory) with dynamic compute offering
+
+    """Test max limit of account (cpunumber and memory) with dynamic
+       compute offering
     """
 
     @classmethod
     def setUpClass(cls):
-        cloudstackTestClient = super(TestAccountLimits,cls).getClsTestClient()
+        cloudstackTestClient = super(TestAccountLimits, cls).getClsTestClient()
         cls.api_client = cloudstackTestClient.getApiClient()
 
         # Fill services from the external config file
@@ -1058,13 +1263,15 @@ class TestAccountLimits(cloudstackTestCase):
 
         # Get Zone, Domain and templates
         cls.domain = get_domain(cls.api_client)
-        cls.zone = get_zone(cls.api_client, cloudstackTestClient.getZoneForTests())
+        cls.zone = get_zone(
+            cls.api_client,
+            cloudstackTestClient.getZoneForTests())
         cls.mode = str(cls.zone.networktype).lower()
         cls.template = get_template(
-                            cls.api_client,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
         cls._cleanup = []
@@ -1101,9 +1308,10 @@ class TestAccountLimits(cloudstackTestCase):
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_max_account_cpus_deploy_VM(self, value):
-        """Test cpu limits of account while deploying VM with dynamic compute offering"""
+        """Test cpu limits of account while deploying VM with dynamic
+           compute offering"""
 
         # Steps:
         # 1. Create Account (admin/user)
@@ -1114,12 +1322,16 @@ class TestAccountLimits(cloudstackTestCase):
         # Validations:
         # 1. VM creation should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         self.cleanup.append(self.account)
 
         Resources.updateLimit(self.apiclient,
@@ -1133,17 +1345,23 @@ class TestAccountLimits(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # Deploy VM with dynamic service offering
         try:
-            VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
+            VirtualMachine.create(
+                self.apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=3, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=3,
+                customcpuspeed=256,
+                custommemory=128)
             self.fail("vm creation should have failed, it succeeded")
         except Exception as e:
             self.debug("vm creation failed as expected with error: %s" % e)
@@ -1151,9 +1369,10 @@ class TestAccountLimits(cloudstackTestCase):
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_max_account_cpus_scale_VM(self, value):
-        """Test cpu limits of account while scaling VM with dynamic compute offering"""
+        """Test cpu limits of account while scaling VM with dynamic
+           compute offering"""
 
         # Steps:
         # 1. Create Account (admin/user)
@@ -1166,12 +1385,16 @@ class TestAccountLimits(cloudstackTestCase):
         # 1. VM creation should succeed
         # 2. VM scaling operation should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         self.cleanup.append(self.account)
 
         Resources.updateLimit(self.apiclient,
@@ -1185,35 +1408,48 @@ class TestAccountLimits(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # Deploy VM with dynamic service offering
         try:
-            virtualMachine = VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                self.apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM with same dynamic offering
         try:
-            virtualMachine.scale(self.apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=256)
+            virtualMachine.scale(
+                self.apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=256)
             self.fail("Scaling virtual machine with cpu number more than \
                     allowed limit (of account) succeded, should have failed")
         except Exception as e:
-            self.debug("Failure while changing service offering as expected: %s" % e)
+            self.debug(
+                "Failure while changing service offering as expected: %s" %
+                e)
 
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_max_account_memory_deploy_VM(self, value):
-        """Test memory limits of account while deploying VM with dynamic compute offering"""
+        """Test memory limits of account while deploying VM with dynamic
+           compute offering"""
 
         # Steps:
         # 1. Create Account (admin/user)
@@ -1224,12 +1460,16 @@ class TestAccountLimits(cloudstackTestCase):
         # Validations:
         # 1. VM creation should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         self.cleanup.append(self.account)
 
         Resources.updateLimit(self.apiclient,
@@ -1243,17 +1483,23 @@ class TestAccountLimits(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # Deploy VM with dynamic service offering
         try:
-            VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
+            VirtualMachine.create(
+                self.apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=3, customcpuspeed=256, custommemory=512)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=3,
+                customcpuspeed=256,
+                custommemory=512)
             self.fail("vm creation should have failed, it succeeded")
         except Exception as e:
             self.debug("vm creation failed as expected with error: %s" % e)
@@ -1261,9 +1507,10 @@ class TestAccountLimits(cloudstackTestCase):
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"])
+    @attr(tags=["basic", "advanced"])
     def test_max_account_memory_scale_VM(self, value):
-        """Test memory limits of account while scaling VM with dynamic compute offering"""
+        """Test memory limits of account while scaling VM with
+           dynamic compute offering"""
 
         # Steps:
         # 1. Create Account (admin/user)
@@ -1276,12 +1523,16 @@ class TestAccountLimits(cloudstackTestCase):
         # 1. VM creation should succeed
         # 2. VM scaling operation should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         self.cleanup.append(self.account)
 
         Resources.updateLimit(self.apiclient,
@@ -1295,39 +1546,53 @@ class TestAccountLimits(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic)
 
         # Deploy VM with dynamic service offering
         try:
-            virtualMachine = VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                self.apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=256)
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=256)
         except Exception as e:
             self.fail("vm creation failed: %s" % e)
 
         # Scale VM with same dynamic offering
         try:
-            virtualMachine.scale(self.apiclient, serviceOfferingId=serviceOffering_dynamic.id,
-                                 customcpunumber=4, customcpuspeed=512, custommemory=512)
+            virtualMachine.scale(
+                self.apiclient,
+                serviceOfferingId=serviceOffering_dynamic.id,
+                customcpunumber=4,
+                customcpuspeed=512,
+                custommemory=512)
             self.fail("Scaling virtual machine with cpu number more than \
                     allowed limit (of account) succeded, should have failed")
         except Exception as e:
-            self.debug("Failure while changing service offering as expected: %s" % e)
+            self.debug(
+                "Failure while changing service offering as expected: %s" %
+                e)
 
         return
 
+
 @ddt
 class TestAffinityGroup(cloudstackTestCase):
+
     """Test affinity group working with VMs created with dynamic offering
     """
 
     @classmethod
     def setUpClass(cls):
-        cloudstackTestClient = super(TestAffinityGroup,cls).getClsTestClient()
+        cloudstackTestClient = super(TestAffinityGroup, cls).getClsTestClient()
         cls.api_client = cloudstackTestClient.getApiClient()
 
         # Fill services from the external config file
@@ -1335,13 +1600,15 @@ class TestAffinityGroup(cloudstackTestCase):
 
         # Get Zone, Domain and templates
         cls.domain = get_domain(cls.api_client)
-        cls.zone = get_zone(cls.api_client, cloudstackTestClient.getZoneForTests())
+        cls.zone = get_zone(
+            cls.api_client,
+            cloudstackTestClient.getZoneForTests())
         cls.mode = str(cls.zone.networktype).lower()
         cls.template = get_template(
-                            cls.api_client,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
         cls._cleanup = []
@@ -1378,7 +1645,7 @@ class TestAffinityGroup(cloudstackTestCase):
         return
 
     @data(ADMIN_ACCOUNT, USER_ACCOUNT)
-    @attr(tags=["basic","advanced"], BugId="7180", required_hardware="true")
+    @attr(tags=["basic", "advanced"], BugId="7180", required_hardware="true")
     def test_deploy_VM_with_affinity_group(self, value):
         """Test deploy VMs with affinity group and dynamic compute offering"""
 
@@ -1391,12 +1658,16 @@ class TestAffinityGroup(cloudstackTestCase):
         # Validations:
         # 1. VM creation should fail
 
-        isadmin=True
+        isadmin = True
         if value == USER_ACCOUNT:
-            isadmin=False
+            isadmin = False
 
         # Create account and api client
-        self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            domainid=self.domain.id,
+            admin=isadmin)
         self.cleanup.append(self.account)
 
         # Create dynamic service offerings
@@ -1404,43 +1675,68 @@ class TestAffinityGroup(cloudstackTestCase):
         self.services["service_offering"]["cpuspeed"] = ""
         self.services["service_offering"]["memory"] = ""
 
-        serviceOffering_dynamic = ServiceOffering.create(self.apiclient,
-                                                 self.services["service_offering"])
+        serviceOffering_dynamic = ServiceOffering.create(
+            self.apiclient,
+            self.services["service_offering"])
 
         self.cleanup_co.append(serviceOffering_dynamic)
 
-        self.services["host_anti_affinity"]["name"] = "aff_grp_" + random_gen(size=6)
-        affinityGroup = AffinityGroup.create(self.apiclient, self.services["host_anti_affinity"],
-                                             self.account.name, self.domain.id)
+        self.services["host_anti_affinity"][
+            "name"] = "aff_grp_" + random_gen(size=6)
+        affinityGroup = AffinityGroup.create(
+            self.apiclient,
+            self.services["host_anti_affinity"],
+            self.account.name,
+            self.domain.id)
 
         # Deploy VM with dynamic service offering
         try:
-            virtualMachine = VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
+            virtualMachine = VirtualMachine.create(
+                self.apiclient,
+                self.services["virtual_machine"],
                 serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128,
-                affinitygroupnames=[affinityGroup.name])
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                customcpunumber=2,
+                customcpuspeed=256,
+                custommemory=128,
+                affinitygroupnames=[
+                    affinityGroup.name])
         except Exception as e:
             self.fail("vm creation failed with error: %s" % e)
 
-        otherHostsInCluster = Host.list(self.apiclient, virtualmachineid=virtualMachine.id)
+        otherHostsInCluster = Host.list(
+            self.apiclient,
+            virtualmachineid=virtualMachine.id)
         if validateList(otherHostsInCluster)[0] == PASS:
             try:
-                VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
-                serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128,
-                affinitygroupnames=[affinityGroup.name])
+                VirtualMachine.create(
+                    self.apiclient,
+                    self.services["virtual_machine"],
+                    serviceofferingid=serviceOffering_dynamic.id,
+                    accountid=self.account.name,
+                    domainid=self.account.domainid,
+                    customcpunumber=2,
+                    customcpuspeed=256,
+                    custommemory=128,
+                    affinitygroupnames=[
+                        affinityGroup.name])
             except Exception as e:
                 self.fail("vm creation failed with error: %s" % e)
 
         else:
             try:
-                VirtualMachine.create(self.apiclient,self.services["virtual_machine"],
-                serviceofferingid=serviceOffering_dynamic.id,
-                accountid=self.account.name,domainid=self.account.domainid,
-                customcpunumber=2, customcpuspeed=256, custommemory=128,
-                affinitygroupnames=[affinityGroup.name])
+                VirtualMachine.create(
+                    self.apiclient,
+                    self.services["virtual_machine"],
+                    serviceofferingid=serviceOffering_dynamic.id,
+                    accountid=self.account.name,
+                    domainid=self.account.domainid,
+                    customcpunumber=2,
+                    customcpuspeed=256,
+                    custommemory=128,
+                    affinitygroupnames=[
+                        affinityGroup.name])
                 self.fail("vm creation should have failed, it succeded")
             except Exception as e:
                 self.debug("vm creation failed as expected with error: %s" % e)


[22/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
Revert "CLOUDSTACK-7872: network getting shutdown inspite of running VM's in the network"
This reverts commit 709bf074de9f8f22e6a71362551c4867be884e4b.

Network GC is broken with out-of-band VM movements due to the original commit, so reverting.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/106ec718
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/106ec718
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/106ec718

Branch: refs/heads/reporter
Commit: 106ec718fc6557ac41f34809dee19bc7eef4a110
Parents: 51ee900
Author: Koushik Das <ko...@apache.org>
Authored: Tue Dec 23 12:13:51 2014 +0530
Committer: Koushik Das <ko...@apache.org>
Committed: Tue Dec 23 12:13:51 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/network/dao/NetworkDaoImpl.java   |  2 ++
 engine/schema/src/com/cloud/vm/dao/NicDao.java  |  2 --
 .../schema/src/com/cloud/vm/dao/NicDaoImpl.java | 22 --------------------
 .../src/com/cloud/network/NetworkModelImpl.java |  7 -------
 4 files changed, 2 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/106ec718/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java
index 4a07455..0c556c8 100644
--- a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java
@@ -244,6 +244,7 @@ public class NetworkDaoImpl extends GenericDaoBase<NetworkVO, Long> implements N
         GarbageCollectedSearch = createSearchBuilder(Long.class);
         GarbageCollectedSearch.selectFields(GarbageCollectedSearch.entity().getId());
         SearchBuilder<NetworkOpVO> join7 = _ntwkOpDao.createSearchBuilder();
+        join7.and("activenics", join7.entity().getActiveNicsCount(), Op.EQ);
         join7.and("gc", join7.entity().isGarbageCollected(), Op.EQ);
         join7.and("check", join7.entity().isCheckForGc(), Op.EQ);
         GarbageCollectedSearch.join("ntwkOpGC", join7, GarbageCollectedSearch.entity().getId(), join7.entity().getId(), JoinBuilder.JoinType.INNER);
@@ -437,6 +438,7 @@ public class NetworkDaoImpl extends GenericDaoBase<NetworkVO, Long> implements N
     public List<Long> findNetworksToGarbageCollect() {
         SearchCriteria<Long> sc = GarbageCollectedSearch.create();
         sc.setJoinParameters("ntwkOffGC", "isPersistent", false);
+        sc.setJoinParameters("ntwkOpGC", "activenics", 0);
         sc.setJoinParameters("ntwkOpGC", "gc", true);
         sc.setJoinParameters("ntwkOpGC", "check", true);
         return customSearch(sc, null);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/106ec718/engine/schema/src/com/cloud/vm/dao/NicDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/vm/dao/NicDao.java b/engine/schema/src/com/cloud/vm/dao/NicDao.java
index 9f153f8..a7ad016 100644
--- a/engine/schema/src/com/cloud/vm/dao/NicDao.java
+++ b/engine/schema/src/com/cloud/vm/dao/NicDao.java
@@ -74,6 +74,4 @@ public interface NicDao extends GenericDao<NicVO, Long> {
     List<NicVO> listByNetworkIdTypeAndGatewayAndBroadcastUri(long networkId, VirtualMachine.Type vmType, String gateway, URI broadcastUri);
 
     int countNicsForStartingVms(long networkId);
-
-    int countNicsForRunningVms(long networkId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/106ec718/engine/schema/src/com/cloud/vm/dao/NicDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/vm/dao/NicDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/NicDaoImpl.java
index ca44b63..2a9a602 100644
--- a/engine/schema/src/com/cloud/vm/dao/NicDaoImpl.java
+++ b/engine/schema/src/com/cloud/vm/dao/NicDaoImpl.java
@@ -46,7 +46,6 @@ public class NicDaoImpl extends GenericDaoBase<NicVO, Long> implements NicDao {
     private SearchBuilder<NicVO> NonReleasedSearch;
     private GenericSearchBuilder<NicVO, Integer> CountBy;
     private GenericSearchBuilder<NicVO, Integer> CountByForStartingVms;
-    private GenericSearchBuilder<NicVO, Integer> CountByForRunningVms;
 
     @Inject
     VMInstanceDao _vmDao;
@@ -96,17 +95,6 @@ public class NicDaoImpl extends GenericDaoBase<NicVO, Long> implements NicDao {
         join1.and("state", join1.entity().getState(), Op.EQ);
         CountByForStartingVms.join("vm", join1, CountByForStartingVms.entity().getInstanceId(), join1.entity().getId(), JoinBuilder.JoinType.INNER);
         CountByForStartingVms.done();
-
-        CountByForRunningVms = createSearchBuilder(Integer.class);
-        CountByForRunningVms.select(null, Func.COUNT, CountByForRunningVms.entity().getId());
-        CountByForRunningVms.and("networkId", CountByForRunningVms.entity().getNetworkId(), Op.EQ);
-        CountByForRunningVms.and("removed", CountByForRunningVms.entity().getRemoved(), Op.NULL);
-        SearchBuilder<VMInstanceVO> join2 = _vmDao.createSearchBuilder();
-        join2.and("state", join2.entity().getState(), Op.EQ);
-        join2.and("type", join2.entity().getType(), Op.EQ);
-        CountByForRunningVms.join("vm", join2, CountByForRunningVms.entity().getInstanceId(), join2.entity().getId(), JoinBuilder.JoinType.INNER);
-        CountByForRunningVms.done();
-
     }
 
     @Override
@@ -303,14 +291,4 @@ public class NicDaoImpl extends GenericDaoBase<NicVO, Long> implements NicDao {
         List<Integer> results = customSearch(sc, null);
         return results.get(0);
     }
-
-    @Override
-    public int countNicsForRunningVms(long networkId) {
-        SearchCriteria<Integer> sc = CountByForRunningVms.create();
-        sc.setParameters("networkId", networkId);
-        sc.setJoinParameters("vm", "state", VirtualMachine.State.Running);
-        sc.setJoinParameters("vm", "type", VirtualMachine.Type.User);
-        List<Integer> results = customSearch(sc, null);
-        return results.get(0);
-    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/106ec718/server/src/com/cloud/network/NetworkModelImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java
index 1fd354d..6088212 100644
--- a/server/src/com/cloud/network/NetworkModelImpl.java
+++ b/server/src/com/cloud/network/NetworkModelImpl.java
@@ -2232,13 +2232,6 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
             return false;
         }
 
-        // Due to VMSync issue, there can be cases where nic count is zero, but there can be VM's running in the network
-        // so add extra guard to check if network GC is actially required.
-        if (_nicDao.countNicsForRunningVms(networkId) > 0) {
-            s_logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are Running at the moment");
-            return false;
-        }
-
         return true;
     }
 


[02/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/95b55841/test/integration/component/test_dedicate_guest_vlan_ranges.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_dedicate_guest_vlan_ranges.py b/test/integration/component/test_dedicate_guest_vlan_ranges.py
deleted file mode 100644
index 9a0e7f0..0000000
--- a/test/integration/component/test_dedicate_guest_vlan_ranges.py
+++ /dev/null
@@ -1,1241 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-""" P1 tests for Dedicating guest VLAN ranges
-
-    Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Dedicated+Resources+-+Public+IP+Addresses+and+VLANs+per+Tenant+Test+Plan
-
-    Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-2251
-
-    Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/FS-+Dedicate+Guest+VLANs+per+tenant
-"""
-#Import Local Modules
-from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import cloudstackTestCase, unittest
-from marvin.lib.utils import (validateList,
-                              cleanup_resources,
-                              random_gen,
-			                  xsplit)
-from marvin.lib.base import (Account,
-                             Domain,
-                             PhysicalNetwork,
-                             NetworkOffering,
-                             Network,
-                             ServiceOffering,
-                             Project)
-from marvin.lib.common import (get_domain,
-                               get_zone,
-                               get_template,
-                               setNonContiguousVlanIds,
-                               isNetworkDeleted)
-from marvin.codes import PASS
-
-def LimitVlanRange(self, vlanrange, range=2):
-    """Limits the length of vlan range"""
-    vlan_endpoints = str(vlanrange).split("-")
-    vlan_startid = int(vlan_endpoints[1])
-    vlan_endid = vlan_startid + (range-1)
-    return str(vlan_startid) + "-" + str(vlan_endid)
-
-class TestDedicateGuestVLANRange(cloudstackTestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.testClient = super(TestDedicateGuestVLANRange, cls).getClsTestClient()
-        cls.apiclient = cls.testClient.getApiClient()
-        cls.testdata =  cls.testClient.getParsedTestDataConfig()
-        # Get Zone, Domain
-        cls.domain = get_domain(cls.apiclient)
-        cls.zone = get_zone(cls.apiclient)
-        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
-        cls.testdata['mode'] = cls.zone.networktype
-        template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.testdata["ostype"]
-            )
-        cls._cleanup = []
-
-        try:
-            cls.isolated_network_offering = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_network_offering)
-            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
-
-            cls.testdata["nw_off_isolated_persistent"]["specifyVlan"] = True
-            cls.isolated_network_offering_vlan = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_network_offering_vlan)
-            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
-
-            cls.service_offering = ServiceOffering.create(
-                                                          cls.apiclient,
-                                                          cls.testdata["service_offering"])
-            cls._cleanup.append(cls.service_offering)
-
-            cls.testdata["small"]["zoneid"] = cls.zone.id
-            cls.testdata["small"]["template"] = template.id
-        except Exception as e:
-            cls.tearDownClass()
-            raise unittest.SkipTest(e)
-        return
-
-    @classmethod
-    def tearDownClass(cls):
-        try:
-            # Cleanup resources used
-            cleanup_resources(cls.apiclient, cls._cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        return
-
-    def setUp(self):
-        self.apiclient = self.testClient.getApiClient()
-        self.dbclient = self.testClient.getDbConnection()
-        self.cleanup = []
-        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
-                                                                            self.zone.id)
-        return
-
-    def tearDown(self):
-        try:
-            # Clean up
-            cleanup_resources(self.apiclient, self.cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        finally:
-            self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_01_dedicate_guest_vlan_range_root_domain(self):
-        """Dedicate guest vlan range to account in root domain
-
-        # Validate the following:
-        # 1. Create two accounts under root domain
-        # 2. Dedicate a new vlan range to account 1
-        # 3. Verify that the new vlan range is dedicated to account 1
-             by listing the dedicated range and checking the account name
-        # 4. Try to create a guest network in account 2 usign the vlan in dedicated range
-        # 5. The operation should fail
-        # 6. Create a guest network in account 2
-        # 7. Verify that the vlan for guest network is acquired from the dedicated range
-        # 8. Delete the guest network in account 2
-        # 9. Verify that the network is deleted
-        # 10.Verify that the vlan is still dedicated to account 1 after deleting the network
-        # 11.Release the vlan range back to the system
-        # 12.Verify that ther list of dedicated vlans doesn't contain the vlan
-        """
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account1)
-
-        self.account2 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account2)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account1.name,
-                                                domainid=self.account1.domainid
-                                            )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
-
-        with self.assertRaises(Exception):
-            isolated_network1 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account2.name,
-                                   self.account2.domainid,
-                                   networkofferingid=self.isolated_network_offering_vlan.id,
-                                   vlan=int(dedicatedvlans[0]))
-            isolated_network1.delete(self.apiclient)
-
-        isolated_network2 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account1.name,
-                                   self.account1.domainid,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network2.id)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network2.delete(self.apiclient)
-        self.assertTrue(isNetworkDeleted(self.apiclient, networkid=isolated_network2.id),
-                        "Network not deleted in timeout period")
-
-        # List after deleting all networks, it should still be dedicated to the account
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        self.debug("Releasing guest vlan range");
-        dedicate_guest_vlan_range_response.release(self.apiclient)
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
-        self.assertEqual(
-                        list_dedicated_guest_vlan_range_response,
-                        None,
-                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
-
-                        )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_02_dedicate_guest_vlan_range_user_domain(self):
-        """Dedicate guest vlan range to account in user domain
-
-        # Validate the following:
-        # 1. Create two accounts under user domain
-        # 2. Dedicate a new vlan range to account 1
-        # 3. Verify that the new vlan range is dedicated to account 1
-             by listing the dedicated range and checking the account name
-        # 4. Try to create a guest network in account 2 usign the vlan in dedicated range
-        # 5. The operation should fail
-        # 6. Create a guest network in account 2
-        # 7. Verify that the vlan for guest network is acquired from the dedicated range
-        # 8. Delete the guest network in account 2
-        # 9. Verify that the network is deleted
-        # 10.Verify that the vlan is still dedicated to account 1 after deleting the network
-        # 11.Release the vlan range back to the system
-        # 12.Verify that ther list of dedicated vlans doesn't contain the vlan
-        """
-        self.user_domain1 = Domain.create(
-                                self.apiclient,
-                                services=self.testdata["domain"],
-                                parentdomainid=self.domain.id)
-        self.cleanup.append(self.user_domain1)
-
-        #Create Account
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.user_domain1.id
-                            )
-        self.cleanup.insert(-1, self.account1)
-
-        #Create Account
-        self.account2 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.user_domain1.id
-                            )
-        self.cleanup.insert(-1, self.account2)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account1.name,
-                                                domainid=self.account1.domainid
-                                            )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
-
-        with self.assertRaises(Exception):
-            isolated_network1 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account2.name,
-                                   self.account2.domainid,
-                                   networkofferingid=self.isolated_network_offering_vlan.id,
-                                   vlan=int(dedicatedvlans[0]))
-            isolated_network1.delete(self.apiclient)
-
-        isolated_network2 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account1.name,
-                                   self.account1.domainid,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network2.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network2.delete(self.apiclient)
-        self.assertTrue(isNetworkDeleted(self.apiclient, networkid=isolated_network2.id),
-                        "Network not deleted in timeout period")
-
-        self.debug("Releasing guest vlan range");
-        dedicate_guest_vlan_range_response.release(self.apiclient)
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
-        self.assertEqual(
-                        list_dedicated_guest_vlan_range_response,
-                        None,
-                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
-
-                        )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_03_multiple_guest_netwoks(self):
-        """Dedicate multiple guest networks in account with dedicated vlan range
-
-        # Validate the following:
-        # 1. Create account under user domain
-        # 2. Dedicate a new vlan range of range 2 to account
-        # 3. Verify that the new vlan range is dedicated to account
-             by listing the dedicated range and checking the account name
-        # 4. Create a guest network in the account
-        # 5. Verify that the vlan of the network is from dedicated range
-        # 6. Repeat steps 4 and 5 for network 2
-        # 7. Now create 3rd guest network in the account
-        # 8. Verify that the vlan of the network is not from the dedicated range, as
-             all the vlans in dedicated range are now exhausted
-        """
-        self.user_domain = Domain.create(
-                                self.apiclient,
-                                services=self.testdata["domain"],
-                                parentdomainid=self.domain.id)
-        self.cleanup.append(self.user_domain)
-
-        #Create Account
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.user_domain.id
-                            )
-        self.cleanup.insert(-1, self.account)
-
-        self.free_vlan["partial_range"][0] = LimitVlanRange(self, self.free_vlan["partial_range"][0], range=2)
-        vlan_startid = int(str(self.free_vlan["partial_range"][0]).split("-")[0])
-        vlan_endid = vlan_startid + 1
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid
-                                            )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        isolated_network1 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account.name,
-                                   self.account.domainid,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network1.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network2 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account.name,
-                                   self.account.domainid,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network2.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network3 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account.name,
-                                   self.account.domainid,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network3.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertFalse(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should not be from the dedicated range")
-        return
-
-
-    @attr(tags = ["invalid"])
-    def test_04_dedicate_guest_vlan_in_project(self):
-        """Dedicate guest vlan range project owner account and test guest network vlan in project
-
-        # Validate the following:
-        # 1. Create account under user domain
-        # 2. Create a project with this account
-        # 3. Dedicate a new vlan range to the account
-        # 4. Verify that the new vlan range is dedicated to account
-             by listing the dedicated range and checking the account name
-        # 5. Create a guest network in the project
-        # 6. Verify that the vlan of the network is from dedicated range
-        # 7. Repeat steps 4 and 5 for network 2
-        # 8. Now create 3rd guest network in the account
-        # 9. Verify that the vlan of the network is not from the dedicated range, as
-             all the vlans in dedicated range are now exhausted
-        """
-        user_domain = Domain.create(
-                                self.apiclient,
-                                services=self.testdata["domain"],
-                                parentdomainid=self.domain.id)
-        self.cleanup.append(user_domain)
-        #Create Account
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=user_domain.id
-                            )
-        self.cleanup.insert(-1, self.account)
-        # Create project as a domain admin
-        project = Project.create(self.apiclient,
-                                 self.testdata["project"],
-                                 account=self.account.name,
-                                 domainid=self.account.domainid)
-        self.cleanup.insert(-2, project)
-
-        self.free_vlan["partial_range"][0] = LimitVlanRange(self, self.free_vlan["partial_range"][0], range=2)
-        vlan_startid = int(str(self.free_vlan["partial_range"][0]).split("-")[0])
-        vlan_endid = vlan_startid + 1
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid
-                                                )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        isolated_network1 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   projectid=project.id,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network1.id, projectid=project.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network2 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   projectid=project.id,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network2.id, projectid=project.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should be from the dedicated range")
-
-        isolated_network3 = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   projectid=project.id,
-                                   networkofferingid=self.isolated_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network3.id, projectid=project.id, listall=True)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertFalse(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
-                        "Vlan of the network should be from the dedicated range")
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_05_dedicate_range_different_accounts(self):
-        """Dedicate two different vlan ranges to two different accounts
-
-        # Validate the following:
-        # 1. Create two accounts in root domain
-        # 2. Update the physical network with two different vlan ranges
-        # 3. Dedicate first vlan range to the account 1
-        # 4. Dedicate 2nd vlan range to account 2
-        # 5. Both the operations should be successful
-        """
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account1)
-
-        self.account2 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account2)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0] + ","+\
-                   self.free_vlan["partial_range"][1]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                 self.apiclient,
-                                                 self.free_vlan["partial_range"][0],
-                                                 physicalnetworkid=self.physical_network.id,
-                                                 account=self.account1.name,
-                                                 domainid=self.account1.domainid
-                                                 )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                       self.apiclient,
-                                                       id=dedicate_guest_vlan_range_response.id
-                                                       )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                 self.apiclient,
-                                                 self.free_vlan["partial_range"][1],
-                                                 physicalnetworkid=self.physical_network.id,
-                                                 account=self.account2.name,
-                                                 domainid=self.account2.domainid
-                                                 )
-
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                       self.apiclient,
-                                                       id=dedicate_guest_vlan_range_response.id
-                                                       )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                         dedicated_guest_vlan_response.account,
-                         self.account2.name,
-                         "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                         )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_07_extend_vlan_range(self):
-        """Dedicate vlan range to an account when some vlan in range are already acquired by same account
-
-        # Validate the following:
-        # 1. Create account under root domain
-        # 2. Add a new vlan range to the physical network
-        # 3. Create a guest network in account using the vlan id from the newly added range
-        # 4. Try to dedicate the vlan range to account
-        # 5. Operation should succeed
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-
-        vlans = str(self.free_vlan["partial_range"][0]).split("-")
-        startid = int(vlans[0])
-        endid = int(vlans[1])
-
-        vlan_range1 = str(startid) + "-" + str(endid)
-        vlan_range2 = str(endid+1) + "-" + str(endid+2)
-        full_range = str(startid) + "-" + str(endid+2)
-
-        new_vlan = self.physical_network.vlan + "," + full_range
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating first range
-        PhysicalNetwork.dedicate(
-                                 self.apiclient,
-                                 vlan_range1,
-                                 physicalnetworkid=self.physical_network.id,
-                                 account=self.account.name,
-                                 domainid=self.account.domainid
-                                 )
-
-        # Dedicating second range
-        PhysicalNetwork.dedicate(
-                                 self.apiclient,
-                                 vlan_range2,
-                                 physicalnetworkid=self.physical_network.id,
-                                 account=self.account.name,
-                                 domainid=self.account.domainid
-                                 )
-
-        dedicated_ranges = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid,
-                                                listall=True
-                                                )
-        self.assertEqual(str(dedicated_ranges[0].guestvlanrange), full_range, "Dedicated vlan\
-                         range not matching with expcted extended range")
-
-        return
-
-class TestFailureScenarios(cloudstackTestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.testClient = super(TestFailureScenarios, cls).getClsTestClient()
-        cls.apiclient = cls.testClient.getApiClient()
-        cls.testdata =  cls.testClient.getParsedTestDataConfig()
-        # Get Zone, Domain
-        cls.domain = get_domain(cls.apiclient)
-        cls.zone = get_zone(cls.apiclient)
-        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
-        cls.testdata['mode'] = cls.zone.networktype
-        template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.testdata["ostype"]
-            )
-        cls._cleanup = []
-
-        try:
-            cls.isolated_network_offering = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_network_offering)
-            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
-
-            cls.testdata["nw_off_isolated_persistent"]["specifyVlan"] = True
-            cls.isolated_network_offering_vlan = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_network_offering_vlan)
-            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
-
-            cls.service_offering = ServiceOffering.create(
-                                                          cls.apiclient,
-                                                          cls.testdata["service_offering"])
-            cls._cleanup.append(cls.service_offering)
-
-            cls.testdata["small"]["zoneid"] = cls.zone.id
-            cls.testdata["small"]["template"] = template.id
-        except Exception as e:
-            cls.tearDownClass()
-            raise unittest.SkipTest(e)
-        return
-
-    @classmethod
-    def tearDownClass(cls):
-        try:
-            # Cleanup resources used
-            cleanup_resources(cls.apiclient, cls._cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        return
-
-    def setUp(self):
-        self.apiclient = self.testClient.getApiClient()
-        self.dbclient = self.testClient.getDbConnection()
-        self.cleanup = []
-        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
-                                                                            self.zone.id)
-        return
-
-    def tearDown(self):
-        try:
-            # Clean up
-            cleanup_resources(self.apiclient, self.cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        finally:
-            self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_01_dedicate_wrong_vlan_range(self):
-        """Dedicate invalid vlan range to account
-
-        # Validate the following:
-        # 1. Create an account in root domain
-        # 2. Try to update physical network with invalid range (5000-5001)
-             and dedicate it to account
-        # 3. The operation should fail
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-
-        vlan_range = "5000-5001"
-
-        new_vlan = self.physical_network.vlan + "," + vlan_range
-
-        with self.assertRaises(Exception):
-            self.physical_network.update(self.apiclient,
-                                         id=self.physical_network.id,
-                                         vlan=new_vlan)
-
-            # Dedicating guest vlan range
-            PhysicalNetwork.dedicate(
-                                     self.apiclient,
-                                     vlan_range,
-                                     physicalnetworkid=self.physical_network.id,
-                                     account=self.account.name,
-                                     domainid=self.account.domainid
-                                    )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_02_dedicate_vlan_range_invalid_account(self):
-        """Dedicate a guest vlan range to invalid account
-
-        # Validate the following:
-        # 1. Create an account in root domain
-        # 2. Update physical network with new guest vlan range
-        # 3. Try to dedicate it to invalid account
-        # 4. The operation should fail
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                                     id=self.physical_network.id,
-                                     vlan=new_vlan)
-
-        with self.assertRaises(Exception):
-            # Dedicating guest vlan range
-            PhysicalNetwork.dedicate(
-                                     self.apiclient,
-                                     self.free_vlan["partial_range"][0],
-                                     physicalnetworkid=self.physical_network.id,
-                                     account=self.account.name+random_gen(),
-                                     domainid=self.account.domainid
-                                    )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_03_dedicate_already_dedicated_range(self):
-        """Dedicate a guest vlan range which is already dedicated
-
-        # Validate the following:
-        # 1. Create two accounts in root domain
-        # 2. Update physical network with new guest vlan range
-        # 3. Dedicate the vlan range to account 1
-        # 4. Try to dedicate the same range to account 2, operation should fail
-        """
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account1)
-
-        self.account2 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account2)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                                     id=self.physical_network.id,
-                                     vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        PhysicalNetwork.dedicate(
-                                     self.apiclient,
-                                     self.free_vlan["partial_range"][0],
-                                     physicalnetworkid=self.physical_network.id,
-                                     account=self.account1.name,
-                                     domainid=self.account1.domainid
-                                    )
-
-        with self.assertRaises(Exception):
-            # Dedicating guest vlan range
-            PhysicalNetwork.dedicate(
-                                     self.apiclient,
-                                     self.free_vlan["partial_range"][0],
-                                     physicalnetworkid=self.physical_network.id,
-                                     account=self.account2.name,
-                                     domainid=self.account2.domainid
-                                    )
-        return
-
-class TestDeleteVlanRange(cloudstackTestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.testClient = super(TestDeleteVlanRange, cls).getClsTestClient()
-        cls.apiclient = cls.testClient.getApiClient()
-        cls.testdata =  cls.testClient.getParsedTestDataConfig()
-        # Get Zone, Domain
-        cls.domain = get_domain(cls.apiclient)
-        cls.zone = get_zone(cls.apiclient)
-        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
-        cls.testdata['mode'] = cls.zone.networktype
-        template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.testdata["ostype"]
-            )
-        cls._cleanup = []
-
-        try:
-            cls.isolated_persistent_network_offering = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_persistent_network_offering)
-            cls.isolated_persistent_network_offering.update(cls.apiclient, state='Enabled')
-
-            cls.isolated_network_offering = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["isolated_network_offering"])
-            cls._cleanup.append(cls.isolated_network_offering)
-            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
-
-            cls.testdata["nw_off_isolated_persistent"]["specifyvlan"] = True
-            cls.isolated_network_offering_vlan = NetworkOffering.create(
-                          cls.apiclient,
-                          cls.testdata["nw_off_isolated_persistent"])
-            cls._cleanup.append(cls.isolated_network_offering_vlan)
-            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
-
-            cls.service_offering = ServiceOffering.create(
-                                                          cls.apiclient,
-                                                          cls.testdata["service_offering"])
-            cls._cleanup.append(cls.service_offering)
-
-            cls.testdata["small"]["zoneid"] = cls.zone.id
-            cls.testdata["small"]["template"] = template.id
-        except Exception as e:
-            cls.tearDownClass()
-            raise unittest.SkipTest(e)
-        return
-
-    @classmethod
-    def tearDownClass(cls):
-        try:
-            # Cleanup resources used
-            cleanup_resources(cls.apiclient, cls._cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        return
-
-    def setUp(self):
-        self.apiclient = self.testClient.getApiClient()
-        self.dbclient = self.testClient.getDbConnection()
-        self.cleanup = []
-        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
-                                                                            self.zone.id)
-        return
-
-    def tearDown(self):
-        try:
-            # Clean up
-            cleanup_resources(self.apiclient, self.cleanup)
-        except Exception as e:
-            raise Exception("Warning: Exception during cleanup : %s" % e)
-        finally:
-            self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_01_delete_dedicated_vlan_range(self):
-        """Try to delete a dedicated vlan range which is not in use
-
-        # Validate the following:
-        # 1. Creat an account in the root domain
-        # 2. update the physical network with a new vlan range
-        # 3. Dedicated this vlan range to the account
-        # 4. Verify that the vlan range is dedicated to the account by listing it
-             and verifying the account name
-        # 5. Try to delete the vlan range by updating physical network vlan, operation should fail
-        # 6. Release the dedicted range and then delete the vlan range
-        # 7. The operation should succeed
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-        new_vlan = self.free_vlan["partial_range"][0]
-        extended_vlan = self.physical_network.vlan + "," + new_vlan
-
-        self.physical_network.update(self.apiclient,
-                                         id=self.physical_network.id,
-                                         vlan=extended_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid
-                                                )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        with self.assertRaises(Exception):
-            # Deleting the dedicated vlan range
-            self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-
-        dedicate_guest_vlan_range_response.release(self.apiclient)
-        self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-        physical_networks = PhysicalNetwork.list(self.apiclient, id=self.physical_network.id, listall=True)
-        self.assertEqual(validateList(physical_networks)[0], PASS, "Physical networks list validation failed")
-        vlans = xsplit(physical_networks[0].vlan, [','])
-        self.assertFalse(new_vlan in vlans, "newly added vlan is not deleted from physical network")
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_02_delete_dedicated_vlan_range_vlan_in_use(self):
-        """Try to delete a dedicated vlan rang which is in use
-
-        # Validate the following:
-        # 1. Creat an account in the root domain
-        # 2. update the physical network with a new vlan range
-        # 3. Dedicated this vlan range to the account
-        # 4. Verify that the vlan range is dedicated to the account by listing it
-             and verifying the account name
-        # 5. Create a guest network in the account and verify that the vlan of network
-             is from the dedicated range
-        # 6. Try to delete the vlan range by updating physical network vlan
-        # 7. The operation should fail
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-
-        self.physical_network.update(self.apiclient,
-                                         id=self.physical_network.id,
-                                         vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid
-                                                )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account.name,
-                                   self.account.domainid,
-                                   networkofferingid=self.isolated_persistent_network_offering.id)
-
-        with self.assertRaises(Exception):
-            # Deleting the dedicated vlan range
-            self.physical_network.update(self.apiclient,
-                        id=self.physical_network.id,
-                        vlan=self.physical_network.vlan)
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_03_delete_account(self):
-        """Try to delete a dedicated vlan rang which is in use
-
-        # Validate the following:
-        # 1. Creat an account in the root domain
-        # 2. Update the physical network with a new vlan range
-        # 3. Dedicated this vlan range to the account
-        # 4. Verify that the vlan range is dedicated to the account by listing it
-             and verifying the account name
-        # 5. Create a guest network in the account which consumes vlan from dedicated range
-        # 6. Delete the account
-        # 7. Verify that the vlan of the physical network remains the same
-        """
-        self.account = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-
-        self.physical_network.update(self.apiclient,
-                                         id=self.physical_network.id,
-                                         vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account.name,
-                                                domainid=self.account.domainid
-                                                )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account.name,
-                                   self.account.domainid,
-                                   networkofferingid=self.isolated_persistent_network_offering.id)
-
-        self.account.delete(self.apiclient)
-        self.cleanup.remove(self.account)
-
-        physical_networks = PhysicalNetwork.list(self.apiclient, id=self.physical_network.id, listall=True)
-        self.assertEqual(validateList(physical_networks)[0], PASS, "Physical networks list validation failed")
-        self.assertEqual(physical_networks[0].vlan, new_vlan, "The vlan of physical network \
-                         should be same after deleting account")
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_04_release_range_no_vlan_in_use(self):
-        """Release a dedicated vlan range when no vlan id is in use
-
-        # Validate the following:
-        # 1. Create account in root domain
-        # 2. Dedicate a new vlan range to account
-        # 3. Verify that the new vlan range is dedicated to account
-             by listing the dedicated range and checking the account name
-        # 4. Release the range
-        # 5. Verify the range is released back to system by listing dedicated ranges (list should be empty)
-        """
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account1)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account1.name,
-                                                domainid=self.account1.domainid
-                                            )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                        )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        self.debug("Releasing guest vlan range");
-        dedicate_guest_vlan_range_response.release(self.apiclient)
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
-        self.assertEqual(
-                        list_dedicated_guest_vlan_range_response,
-                        None,
-                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
-
-                        )
-        return
-
-    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
-    def test_05_release_range_vlan_in_use(self):
-        """Release a dedicated vlan range when no vlan id is in use
-
-        # Validate the following:
-        # 1. Create account in root domain
-        # 2. Dedicate a new vlan range to account
-        # 3. Verify that the new vlan range is dedicated to account
-             by listing the dedicated range and checking the account name
-        # 4. Release the range
-        # 5. The operation should succeed, as all vlans which are not in use should be released
-        """
-        self.account1 = Account.create(
-                            self.apiclient,
-                            self.testdata["account"],
-                            domainid=self.domain.id
-                            )
-        self.cleanup.append(self.account1)
-
-        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
-        self.physical_network.update(self.apiclient,
-                id=self.physical_network.id, vlan=new_vlan)
-
-        # Dedicating guest vlan range
-        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
-                                                self.apiclient,
-                                                self.free_vlan["partial_range"][0],
-                                                physicalnetworkid=self.physical_network.id,
-                                                account=self.account1.name,
-                                                domainid=self.account1.domainid
-                                            )
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
-                                                self.apiclient,
-                                                id=dedicate_guest_vlan_range_response.id
-                                                )
-        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
-        self.assertEqual(
-                            dedicated_guest_vlan_response.account,
-                            self.account1.name,
-                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
-                        )
-
-        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
-
-        isolated_network = Network.create(
-                                   self.apiclient,
-                                   self.testdata["isolated_network"],
-                                   self.account1.name,
-                                   self.account1.domainid,
-                                   networkofferingid=self.isolated_persistent_network_offering.id)
-
-        networks = Network.list(self.apiclient, id=isolated_network.id)
-        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
-
-        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
-                        "Vlan of the network should be from the dedicated range")
-
-        self.debug("Releasing guest vlan range");
-        dedicate_guest_vlan_range_response.release(self.apiclient)
-        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
-        self.assertEqual(
-                        list_dedicated_guest_vlan_range_response,
-                        None,
-                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
-
-                        )
-        return


[08/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
show Warning instead of Confirmation in confirm dialog if isWarning is set to true


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/54db0d2a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/54db0d2a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/54db0d2a

Branch: refs/heads/reporter
Commit: 54db0d2a701767de4b7980802f8662eecb9a610b
Parents: 0c4128e
Author: Wei Zhou <w....@tech.leaseweb.com>
Authored: Fri Dec 19 10:06:31 2014 +0100
Committer: Wei Zhou <w....@tech.leaseweb.com>
Committed: Fri Dec 19 10:06:31 2014 +0100

----------------------------------------------------------------------
 client/WEB-INF/classes/resources/messages.properties | 1 +
 ui/dictionary2.jsp                                   | 1 +
 ui/scripts/instances.js                              | 2 ++
 ui/scripts/ui/dialog.js                              | 2 +-
 4 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/54db0d2a/client/WEB-INF/classes/resources/messages.properties
----------------------------------------------------------------------
diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties
index 77d80a7..990bb31 100644
--- a/client/WEB-INF/classes/resources/messages.properties
+++ b/client/WEB-INF/classes/resources/messages.properties
@@ -1275,6 +1275,7 @@ label.vsmstoragevlanid=Storage VLAN ID
 label.vsphere.managed=vSphere Managed
 label.waiting=Waiting
 label.warn=Warn
+label.warning=Warning
 label.wednesday=Wednesday
 label.weekly=Weekly
 label.welcome.cloud.console=Welcome to Management Console

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/54db0d2a/ui/dictionary2.jsp
----------------------------------------------------------------------
diff --git a/ui/dictionary2.jsp b/ui/dictionary2.jsp
index 635a374..1b2089c 100644
--- a/ui/dictionary2.jsp
+++ b/ui/dictionary2.jsp
@@ -256,6 +256,7 @@ under the License.
 'label.vsphere.managed': '<fmt:message key="label.vsphere.managed" />',
 'label.waiting': '<fmt:message key="label.waiting" />',
 'label.warn': '<fmt:message key="label.warn" />',
+'label.warning': '<fmt:message key="label.warning" />',
 'label.wednesday': '<fmt:message key="label.wednesday" />',
 'label.weekly': '<fmt:message key="label.weekly" />',
 'label.welcome.cloud.console': '<fmt:message key="label.welcome.cloud.console" />',

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/54db0d2a/ui/scripts/instances.js
----------------------------------------------------------------------
diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js
index 90ce2b1..d439c66 100644
--- a/ui/scripts/instances.js
+++ b/ui/scripts/instances.js
@@ -675,6 +675,7 @@
                             confirm: function(args) {
                                 return 'message.action.expunge.instance';
                             },
+                            isWarning: true,
                             notification: function(args) {
                                 return 'label.action.expunge.instance';
                             }
@@ -741,6 +742,7 @@
                             confirm: function(args) {
                                 return 'message.reinstall.vm';
                             },
+                            isWarning: true,
                             notification: function(args) {
                                 return 'label.reinstall.vm';
                             },

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/54db0d2a/ui/scripts/ui/dialog.js
----------------------------------------------------------------------
diff --git a/ui/scripts/ui/dialog.js b/ui/scripts/ui/dialog.js
index 610cb1a..e5ac14b 100644
--- a/ui/scripts/ui/dialog.js
+++ b/ui/scripts/ui/dialog.js
@@ -827,7 +827,7 @@
                     _l(args.message)
                 )
             ).dialog({
-                title: _l('label.confirmation'),
+                title: args.isWarning ? _l('label.warning') : _l('label.confirmation'),
                 dialogClass: args.isWarning ? 'confirm warning': 'confirm',
                 closeOnEscape: false,
                 zIndex: 5000,


[35/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8116: Moved ldap data to configurableData section in test_data.py and made related changes in the test case

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0ed40140
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0ed40140
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0ed40140

Branch: refs/heads/reporter
Commit: 0ed40140a67737c91a8e2ab15b3d55073c8eedf7
Parents: f420dd5
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Tue Dec 23 17:29:31 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Wed Dec 24 11:10:18 2014 +0530

----------------------------------------------------------------------
 test/integration/component/test_ldap.py | 122 ++++++++++++++++-----------
 tools/marvin/marvin/config/test_data.py |  35 ++++----
 2 files changed, 88 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0ed40140/test/integration/component/test_ldap.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_ldap.py b/test/integration/component/test_ldap.py
index 5a78957..3464022 100644
--- a/test/integration/component/test_ldap.py
+++ b/test/integration/component/test_ldap.py
@@ -20,22 +20,19 @@
 
 
 #!/usr/bin/env python
-
-import marvin
-from marvin import cloudstackTestCase
-from marvin.cloudstackTestCase import *
-import unittest
-import hashlib
-import random
-from marvin.cloudstackAPI import *
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackAPI import (updateConfiguration,
+                                  createAccount,
+                                  deleteAccount,
+                                  addLdapConfiguration,
+                                  deleteLdapConfiguration)
 from marvin.cloudstackAPI import login
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+from marvin.lib.utils import cleanup_resources
 from nose.plugins.attrib import attr
-import urllib
+
 
 class TestLdap(cloudstackTestCase):
+
     """
     This tests attempts to register a LDAP server and authenticate as an LDAP user.
     """
@@ -46,11 +43,8 @@ class TestLdap(cloudstackTestCase):
         testClient = super(TestLdap, cls).getClsTestClient()
         cls.api_client = testClient.getApiClient()
         cls.services = testClient.getParsedTestDataConfig()
-        cls.account = cls.services["ldap_account"]
         cls._cleanup = []
 
-
-
     @classmethod
     def tearDownClass(cls):
         try:
@@ -66,12 +60,18 @@ class TestLdap(cloudstackTestCase):
 
         self.acct = createAccount.createAccountCmd()
         self.acct.accounttype = 0
-        self.acct.firstname = self.services["ldap_account"]["firstname"]
-        self.acct.lastname = self.services["ldap_account"]["lastname"]
-        self.acct.password = self.services["ldap_account"]["password"]
-        self.acct.username = self.services["ldap_account"]["username"]
-        self.acct.email = self.services["ldap_account"]["email"]
-        self.acct.account = self.services["ldap_account"]["username"]
+        self.acct.firstname = self.services[
+            "configurableData"]["ldap_account"]["firstname"]
+        self.acct.lastname = self.services[
+            "configurableData"]["ldap_account"]["lastname"]
+        self.acct.password = self.services[
+            "configurableData"]["ldap_account"]["password"]
+        self.acct.username = self.services[
+            "configurableData"]["ldap_account"]["username"]
+        self.acct.email = self.services[
+            "configurableData"]["ldap_account"]["email"]
+        self.acct.account = self.services[
+            "configurableData"]["ldap_account"]["username"]
         self.acct.domainid = 1
 
         self.acctRes = self.apiClient.createAccount(self.acct)
@@ -84,14 +84,17 @@ class TestLdap(cloudstackTestCase):
             deleteAcct = deleteAccount.deleteAccountCmd()
             deleteAcct.id = self.acctRes.id
 
-            acct_name=self.acctRes.name
+            acct_name = self.acctRes.name
 
             self.apiClient.deleteAccount(deleteAcct)
 
-            self.debug("Deleted the the following account name %s:" %acct_name)
+            self.debug(
+                "Deleted the the following account name %s:" %
+                acct_name)
 
-            if(self.ldapconfRes==1):
-                self._deleteLdapConfiguration(self.services["ldapConfiguration_1"])
+            if(self.ldapconfRes == 1):
+                self._deleteLdapConfiguration(
+                    self.services["configurableData"]["ldap_configuration"])
 
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
@@ -103,30 +106,33 @@ class TestLdap(cloudstackTestCase):
         This test configures LDAP and attempts to authenticate as a user.
         """
 
-
         self.debug("start test")
 
-        self.ldapconfRes=self._addLdapConfiguration(self.services["ldapConfiguration_1"])
+        self.ldapconfRes = self._addLdapConfiguration(
+            self.services["configurableData"]["ldap_configuration"])
 
-        if(self.ldapconfRes==1):
+        if(self.ldapconfRes == 1):
 
             self.debug("Ldap Configuration was succcessful")
 
-            loginRes = self._checkLogin(self.services["ldapConfiguration_1"]["ldapUsername"],self.services["ldapConfiguration_1"]["ldapPassword"])
+            loginRes = self._checkLogin(
+                self.services["configurableData"]["ldap_configuration"]["ldapUsername"],
+                self.services["configurableData"]["ldap_configuration"]["ldapPassword"])
             self.debug(loginRes)
-            self.assertEquals(loginRes,1,"Ldap Authentication")
+            self.assertEquals(loginRes, 1, "Ldap Authentication")
 
         else:
 
             self.debug("LDAP Configuration failed with exception")
 
-            self.assertEquals(self.ldapconfRes,1,"addLdapConfiguration failed")
-
+            self.assertEquals(
+                self.ldapconfRes,
+                1,
+                "addLdapConfiguration failed")
 
         self.debug("end test")
 
-    def _addLdapConfiguration(self,ldapConfiguration):
-
+    def _addLdapConfiguration(self, ldapConfiguration):
         """
 
         :param ldapConfiguration
@@ -138,27 +144,42 @@ class TestLdap(cloudstackTestCase):
         updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
         updateConfigurationCmd.name = "ldap.basedn"
         updateConfigurationCmd.value = ldapConfiguration['basedn']
-        updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
-        self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
+        updateConfigurationResponse = self.apiClient.updateConfiguration(
+            updateConfigurationCmd)
+        self.debug(
+            "updated the parameter %s with value %s" %
+            (updateConfigurationResponse.name,
+             updateConfigurationResponse.value))
 
         updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
         updateConfigurationCmd.name = "ldap.email.attribute"
         updateConfigurationCmd.value = ldapConfiguration['emailAttribute']
-        updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
-        self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
+        updateConfigurationResponse = self.apiClient.updateConfiguration(
+            updateConfigurationCmd)
+        self.debug(
+            "updated the parameter %s with value %s" %
+            (updateConfigurationResponse.name,
+             updateConfigurationResponse.value))
 
         updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
         updateConfigurationCmd.name = "ldap.user.object"
         updateConfigurationCmd.value = ldapConfiguration['userObject']
-        updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
-        self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
-
+        updateConfigurationResponse = self.apiClient.updateConfiguration(
+            updateConfigurationCmd)
+        self.debug(
+            "updated the parameter %s with value %s" %
+            (updateConfigurationResponse.name,
+             updateConfigurationResponse.value))
 
         updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
         updateConfigurationCmd.name = "ldap.username.attribute"
         updateConfigurationCmd.value = ldapConfiguration['usernameAttribute']
-        updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
-        self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
+        updateConfigurationResponse = self.apiClient.updateConfiguration(
+            updateConfigurationCmd)
+        self.debug(
+            "updated the parameter %s with value %s" %
+            (updateConfigurationResponse.name,
+             updateConfigurationResponse.value))
 
         self.debug("start addLdapConfiguration test")
 
@@ -171,12 +192,11 @@ class TestLdap(cloudstackTestCase):
             self.apiClient.addLdapConfiguration(ldapServer)
             self.debug("addLdapConfiguration was successful")
             return 1
-        except Exception, e:
-            self.debug("addLdapConfiguration failed %s" %e)
+        except Exception as e:
+            self.debug("addLdapConfiguration failed %s" % e)
             return 0
 
-    def _deleteLdapConfiguration(self,ldapConfiguration):
-
+    def _deleteLdapConfiguration(self, ldapConfiguration):
         """
 
         :param ldapConfiguration
@@ -190,8 +210,8 @@ class TestLdap(cloudstackTestCase):
             self.apiClient.deleteLdapConfiguration(ldapServer)
             self.debug("deleteLdapConfiguration was successful")
             return 1
-        except Exception, e:
-            self.debug("deleteLdapConfiguration failed %s" %e)
+        except Exception as e:
+            self.debug("deleteLdapConfiguration failed %s" % e)
             return 0
 
     def _checkLogin(self, username, password):
@@ -216,6 +236,6 @@ class TestLdap(cloudstackTestCase):
                 self.debug("login successful")
                 return 1
 
-        except Exception, p:
-            self.debug("login operation failed %s" %p)
+        except Exception as p:
+            self.debug("login operation failed %s" % p)
         self.debug("end of Login")

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0ed40140/tools/marvin/marvin/config/test_data.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py
index f123dd0..d5ed353 100644
--- a/tools/marvin/marvin/config/test_data.py
+++ b/tools/marvin/marvin/config/test_data.py
@@ -1391,24 +1391,6 @@ test_data = {
                 },
                 "ostype": 'CentOS 5.6 (64-bit)',
         },
-        "ldap_account": {
-            "email": "rmurphy@cloudstack.org",
-            "firstname": "Ryan",
-            "lastname": "Murphy",
-            "username": "rmurphy",
-            "password": "internalcloudstackpassword",
-            },
-        "ldapConfiguration_1": {
-            "basedn": "dc=cloudstack,dc=org",
-            "emailAttribute": "mail",
-            "userObject": "inetOrgPerson",
-            "usernameAttribute": "uid",
-            "hostname": "localhost",
-            "port": "10389",
-            "ldapUsername": "rmurphy",
-            "ldapPassword": "password"
-            },
-
       "test_34_DeployVM_in_SecondSGNetwork": {
           "zone": "advsg",
           "config": "D:\ACS-Repo\setup\dev\\advancedsg.cfg",#Absolute path to cfg file
@@ -1469,6 +1451,23 @@ test_data = {
                  "username": "root",
                  "password": "password",
         },
+       "ldap_account": {
+            "email": "",
+            "firstname": "",
+            "lastname": "",
+            "username": "",
+            "password": "",
+        },
+        "ldap_configuration": {
+            "basedn": "",
+            "emailAttribute": "",
+            "userObject": "",
+            "usernameAttribute": "",
+            "hostname": "",
+            "port": "",
+            "ldapUsername": "",
+            "ldapPassword": ""
+        },
         "systemVmDelay": 120
     }
 }


[11/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8102: UI > Quick Install Wizard > update admin > should encode parameter value once instead of twice before sending it to API.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/4cb95055
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/4cb95055
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/4cb95055

Branch: refs/heads/reporter
Commit: 4cb950554847a47169a50eef6af964f6a180fa7f
Parents: 5fea96f
Author: Jessica Wang <je...@apache.org>
Authored: Fri Dec 19 15:41:42 2014 -0800
Committer: Jessica Wang <je...@apache.org>
Committed: Fri Dec 19 15:43:12 2014 -0800

----------------------------------------------------------------------
 ui/scripts/installWizard.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4cb95055/ui/scripts/installWizard.js
----------------------------------------------------------------------
diff --git a/ui/scripts/installWizard.js b/ui/scripts/installWizard.js
index 535f89f..e6d840a 100644
--- a/ui/scripts/installWizard.js
+++ b/ui/scripts/installWizard.js
@@ -35,7 +35,7 @@
                 url: createURL('updateUser'),
                 data: {
                     id: cloudStack.context.users[0].userid,
-                    password: md5Hashed ? $.md5(args.data.password) : todb(args.data.password)
+                    password: md5Hashed ? $.md5(args.data.password) : args.data.password
                 },
                 dataType: 'json',
                 async: true,


[47/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-6920 Support listing of LBHealthcheck policy with LBHealthcheck policy ID


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/c7b23d0a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/c7b23d0a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/c7b23d0a

Branch: refs/heads/reporter
Commit: c7b23d0a10fa6fc55820f298cce658bff0b8125c
Parents: 15b3486
Author: Rajesh Battala <ra...@citrix.com>
Authored: Wed Jun 18 15:42:23 2014 +0530
Committer: Rajesh Battala <ra...@citrix.com>
Committed: Fri Dec 26 21:01:03 2014 +0530

----------------------------------------------------------------------
 .../network/lb/LoadBalancingRulesService.java   |  2 ++
 .../ListLBHealthCheckPoliciesCmd.java           | 21 ++++++++++++++++++--
 .../lb/LoadBalancingRulesManagerImpl.java       | 16 +++++++++++++--
 3 files changed, 35 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7b23d0a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java
index 3e11014..50b39d2 100644
--- a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java
+++ b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java
@@ -161,4 +161,6 @@ public interface LoadBalancingRulesService {
     HealthCheckPolicy updateLBHealthCheckPolicy(long id, String customId, Boolean forDisplay);
 
     LoadBalancer findLbByStickinessId(long stickinessPolicyId);
+
+    Long findLBIdByHealtCheckPolicyId(long lbHealthCheckPolicy);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7b23d0a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java
index 7f78da64..3f2082a 100644
--- a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java
@@ -29,6 +29,8 @@ import org.apache.cloudstack.api.response.LBHealthCheckResponse;
 import org.apache.cloudstack.api.response.ListResponse;
 import org.apache.log4j.Logger;
 
+
+import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.network.rules.HealthCheckPolicy;
 import com.cloud.network.rules.LoadBalancer;
 
@@ -45,13 +47,15 @@ public class ListLBHealthCheckPoliciesCmd extends BaseListCmd {
     @Parameter(name = ApiConstants.LBID,
                type = CommandType.UUID,
                entityType = FirewallRuleResponse.class,
-               required = true,
                description = "the ID of the load balancer rule")
     private Long lbRuleId;
 
     @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "list resources by display flag; only ROOT admin is eligible to pass this parameter", since = "4.4", authorized = {RoleType.Admin})
     private Boolean display;
 
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = LBHealthCheckResponse.class, description = "the ID of the healthcheck policy", since = "4.4")
+    private Long id;
+
     // ///////////////////////////////////////////////////
     // ///////////////// Accessors ///////////////////////
     // ///////////////////////////////////////////////////
@@ -59,6 +63,10 @@ public class ListLBHealthCheckPoliciesCmd extends BaseListCmd {
         return lbRuleId;
     }
 
+    public Long getId() {
+        return id;
+    }
+
     public boolean getDisplay() {
         if (display != null) {
             return display;
@@ -78,9 +86,18 @@ public class ListLBHealthCheckPoliciesCmd extends BaseListCmd {
     @Override
     public void execute() {
         List<LBHealthCheckResponse> hcpResponses = new ArrayList<LBHealthCheckResponse>();
-        LoadBalancer lb = _lbService.findById(getLbRuleId());
         ListResponse<LBHealthCheckResponse> response = new ListResponse<LBHealthCheckResponse>();
+        Long lbRuleId = getLbRuleId();
+        Long hId = getId();
+        if(lbRuleId == null) {
+            if(hId != null) {
+                lbRuleId = _lbService.findLBIdByHealtCheckPolicyId(hId);
+            } else {
+                throw new InvalidParameterValueException("Either LB Ruleid or HealthCheckpolicy Id should be specified");
+            }
+        }
 
+        LoadBalancer lb = _lbService.findById(lbRuleId);
         if (lb != null) {
             List<? extends HealthCheckPolicy> healthCheckPolicies = _lbService.searchForLBHealthCheckPolicies(this);
             LBHealthCheckResponse spResponse = _responseGenerator.createLBHealthCheckPolicyResponse(healthCheckPolicies, lb);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7b23d0a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
index fbb862e..d7a85b6 100644
--- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
+++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
@@ -2263,15 +2263,18 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
     public List<LBHealthCheckPolicyVO> searchForLBHealthCheckPolicies(ListLBHealthCheckPoliciesCmd cmd) throws PermissionDeniedException {
         Account caller = CallContext.current().getCallingAccount();
         Long loadBalancerId = cmd.getLbRuleId();
+        Long policyId = cmd.getId();
         boolean forDisplay = cmd.getDisplay();
-
+        if(loadBalancerId == null) {
+            loadBalancerId = findLBIdByHealtCheckPolicyId(policyId);
+        }
         LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId);
         if (loadBalancer == null) {
             return null;
         }
 
         _accountMgr.checkAccess(caller, null, true, loadBalancer);
-        List<LBHealthCheckPolicyVO> hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerIdAndDisplayFlag(cmd.getLbRuleId(), forDisplay);
+        List<LBHealthCheckPolicyVO> hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerIdAndDisplayFlag(loadBalancerId, forDisplay);
 
         return hcDbpolicies;
     }
@@ -2569,4 +2572,13 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
         return _lb2healthcheckDao.findById(id);
     }
 
+    @Override
+    public Long findLBIdByHealtCheckPolicyId(long lbHealthCheckPolicy) {
+        LBHealthCheckPolicyVO policy= _lb2healthcheckDao.findById(lbHealthCheckPolicy);
+        if(policy != null) {
+            return policy.getLoadBalancerId();
+        }
+        return null;
+    }
+
 }


[06/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8084: Fixed test_17_add_nic_different_zone in test_add_remove_network.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/3090e4a0
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/3090e4a0
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/3090e4a0

Branch: refs/heads/reporter
Commit: 3090e4a0301c4be50d0d46703b2d9fa070c2e91b
Parents: 6966980
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Wed Dec 17 17:35:35 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Fri Dec 19 10:20:29 2014 +0530

----------------------------------------------------------------------
 test/integration/component/test_add_remove_network.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/3090e4a0/test/integration/component/test_add_remove_network.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_add_remove_network.py b/test/integration/component/test_add_remove_network.py
index aacd44b..2f685b5 100644
--- a/test/integration/component/test_add_remove_network.py
+++ b/test/integration/component/test_add_remove_network.py
@@ -1263,6 +1263,7 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
         self.debug("Creating isolated network in zone %s which is foreign to VM" %
                     foreignZoneId)
         isolated_network = Network.create(self.apiclient,self.services["isolated_network"],
+                                          self.account.name, self.account.domainid,
                                           networkofferingid=self.isolated_network_offering.id)
         self.debug("Created isolated network %s in zone %s" %
                    (isolated_network.id, foreignZoneId))
@@ -1274,7 +1275,7 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
         cmd.networkid = isolated_network.id
 
         with self.assertRaises(Exception) as e:
-            time.sleep(5) 
+            time.sleep(5)
             self.apiclient.addNicToVirtualMachine(cmd)
 	    self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
 


[13/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8101: volume sync not working as expected - MS restart during upload volume leaves volume in hung state.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e559b15b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e559b15b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e559b15b

Branch: refs/heads/reporter
Commit: e559b15b6a166e2eb5f9b044338295fe8c9d219d
Parents: ea63455
Author: Min Chen <mi...@citrix.com>
Authored: Fri Dec 19 16:48:45 2014 -0800
Committer: Min Chen <mi...@citrix.com>
Committed: Fri Dec 19 16:51:41 2014 -0800

----------------------------------------------------------------------
 .../org/apache/cloudstack/storage/volume/VolumeServiceImpl.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e559b15b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 5d10c7f..c00785e 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -1382,8 +1382,8 @@ public class VolumeServiceImpl implements VolumeService {
                         return;
                     }
 
-                    // we can only resume those uploaded volume with a URL specified
-                    List<VolumeDataStoreVO> dbVolumes = _volumeStoreDao.listUploadedVolumesByStoreId(storeId);
+                    // find all the db volumes including those with NULL url column to avoid accidentally deleting volumes on image store later.
+                    List<VolumeDataStoreVO> dbVolumes = _volumeStoreDao.listByStoreId(storeId);
                     List<VolumeDataStoreVO> toBeDownloaded = new ArrayList<VolumeDataStoreVO>(dbVolumes);
                     for (VolumeDataStoreVO volumeStore : dbVolumes) {
                         VolumeVO volume = volDao.findById(volumeStore.getVolumeId());


[16/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8100: Fixed pep8 issues in test_vpc.py

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f52f968c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f52f968c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f52f968c

Branch: refs/heads/reporter
Commit: f52f968c83dac27d1e55e09f198b1c741af9878d
Parents: 331e257
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Fri Dec 19 18:59:53 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Mon Dec 22 12:55:41 2014 +0530

----------------------------------------------------------------------
 test/integration/component/test_vpc.py | 2585 ++++++++++++++-------------
 1 file changed, 1301 insertions(+), 1284 deletions(-)
----------------------------------------------------------------------



[09/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-6666: UI > network > VPC > Router > Public IP Addresses > IP Address detailView > Configuration tab > Port Forwarding > Select VM screen > implement keyword search.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/8bcde024
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/8bcde024
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/8bcde024

Branch: refs/heads/reporter
Commit: 8bcde024a8fbdf284c36f50131360a387d8be9d2
Parents: 54db0d2
Author: Jessica Wang <je...@apache.org>
Authored: Fri Dec 19 14:26:42 2014 -0800
Committer: Jessica Wang <je...@apache.org>
Committed: Fri Dec 19 14:28:15 2014 -0800

----------------------------------------------------------------------
 ui/scripts/network.js | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bcde024/ui/scripts/network.js
----------------------------------------------------------------------
diff --git a/ui/scripts/network.js b/ui/scripts/network.js
index 0a46fa0..44e66b6 100755
--- a/ui/scripts/network.js
+++ b/ui/scripts/network.js
@@ -4120,18 +4120,18 @@
                                                     dataProvider: singleVmSecondaryIPSubselect
                                                 },
                                                 dataProvider: function(args) {
-                                                    var networkid;
-                                                    if ('vpc' in args.context)
+                                                	var data = {};
+                                                    listViewDataProvider(args, data);
+                                                	
+                                                	var networkid;
+                                                    if ('vpc' in args.context) {
                                                         networkid = args.context.multiData.tier;
-                                                    else
+                                                    } else {
                                                         networkid = args.context.ipAddresses[0].associatednetworkid;
-
-                                                    var data = {
-                                                        page: args.page,
-                                                        pageSize: pageSize,
-                                                        listAll: true,
-                                                        networkid: networkid
-                                                    };
+                                                    }                                                    
+                                                    $.extend(data, {
+                                                    	networkid: networkid
+                                                    });    
 
                                                     if (!args.context.projects) {
                                                         $.extend(data, {


[41/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8123. VM re-install fails.
While moving root disk into VM folder during restore, look for folder based on vCenter name.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1c0bf321
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1c0bf321
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1c0bf321

Branch: refs/heads/reporter
Commit: 1c0bf321707b55e1502dd3d102ec192b00a83c19
Parents: a5a65c7
Author: Likitha Shetty <li...@citrix.com>
Authored: Fri Dec 5 18:47:38 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 14:01:20 2014 +0530

----------------------------------------------------------------------
 .../com/cloud/storage/resource/VmwareStorageProcessor.java  | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1c0bf321/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
index ba2255b..247c220 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
@@ -487,9 +487,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
                 dsMo.deleteFile(srcFile, dcMo.getMor(), true);
             }
             // restoreVM - move the new ROOT disk into corresponding VM folder
-            String vmInternalCSName = volume.getVmName();
-            if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmInternalCSName)) {
-                VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmInternalCSName, dsMo, vmdkFileBaseName);
+            VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName());
+            if (restoreVmMo != null) {
+                String vmNameInVcenter = restoreVmMo.getName(); // VM folder name in datastore will be VM's name in vCenter.
+                if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmNameInVcenter)) {
+                    VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmNameInVcenter, dsMo, vmdkFileBaseName);
+                }
             }
 
             VolumeObjectTO newVol = new VolumeObjectTO();


[20/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8109. Extract Template is failing.
Fix the OVA path that is returned once an OVA is packaged using a META file.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/507d9d33
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/507d9d33
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/507d9d33

Branch: refs/heads/reporter
Commit: 507d9d337d2e80f7f6e5d665708c0687c387c5fc
Parents: 0f224c8
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Oct 27 16:56:29 2014 +0530
Committer: Sanjay Tripathi <sa...@citrix.com>
Committed: Tue Dec 23 10:44:10 2014 +0530

----------------------------------------------------------------------
 .../manager/VmwareStorageManagerImpl.java       | 40 +++++++++-----------
 1 file changed, 17 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/507d9d33/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 3d6a4fb..86b1edf 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -148,37 +148,32 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
     public String createOvaForTemplate(TemplateObjectTO template) {
         DataStoreTO storeTO = template.getDataStore();
         if (!(storeTO instanceof NfsTO)) {
-            s_logger.debug("can only handle nfs storage, when create ova from volume");
+            s_logger.debug("Can only handle NFS storage, while creating OVA from template");
             return null;
         }
         NfsTO nfsStore = (NfsTO)storeTO;
         String secStorageUrl = nfsStore.getUrl();
         assert (secStorageUrl != null);
         String installPath = template.getPath();
-        String ovafileName = "";
         String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl);
         String installFullPath = secondaryMountPoint + "/" + installPath;
-
-        String templateName = installFullPath;   // should be a file ending .ova;
         try {
-            if (templateName.endsWith(".ova")) {
-                if (new File(templateName).exists()) {
-                    s_logger.debug("OVA files exists. succeed. ");
-                    return installPath;
+            if (installFullPath.endsWith(".ova")) {
+                if (new File(installFullPath).exists()) {
+                    s_logger.debug("OVA file found at: " + installFullPath);
                 } else {
-                    if (new File(templateName + ".meta").exists()) {
-                        ovafileName = getOVAFromMetafile(templateName + ".meta");
-                        s_logger.debug("OVA file in meta file is " + ovafileName);
-                        return ovafileName;
+                    if (new File(installFullPath + ".meta").exists()) {
+                        createOVAFromMetafile(installFullPath + ".meta");
                     } else {
-                        String msg = "Unable to find ova meta or ova file to prepare template (vmware)";
+                        String msg = "Unable to find OVA or OVA MetaFile to prepare template.";
                         s_logger.error(msg);
                         throw new Exception(msg);
                     }
                 }
+                return installPath;
             }
         } catch (Throwable e) {
-            s_logger.debug("Failed to create ova: " + e.toString());
+            s_logger.debug("Failed to create OVA: " + e.toString());
         }
         return null;
     }
@@ -1042,12 +1037,12 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
 
     // here we use a method to return the ovf and vmdk file names; Another way to do it:
     // create a new class, and like TemplateLocation.java and create templateOvfInfo.java to handle it;
-    private String getOVAFromMetafile(String metafileName) throws Exception {
+    private String createOVAFromMetafile(String metafileName) throws Exception {
         File ova_metafile = new File(metafileName);
         Properties props = null;
         FileInputStream strm = null;
         String ovaFileName = "";
-        s_logger.info("getOVAfromMetaFile: " + metafileName);
+        s_logger.info("Creating OVA using MetaFile: " + metafileName);
         try {
             strm = new FileInputStream(ova_metafile);
 
@@ -1088,18 +1083,17 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
             command.execute();
             s_logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString());
             // to be safe, physically test existence of the target OVA file
-            if ((new File(exportDir + ovaFileName)).exists()) {
-                s_logger.info("ova file is created and ready to extract ");
-                return (ovaFileName);
+            if ((new File(exportDir + File.separator + ovaFileName)).exists()) {
+                s_logger.info("OVA file: " + ovaFileName +" is created and ready to extract.");
+                return ovaFileName;
             } else {
-                String msg = exportDir + File.separator + ovaFileName + ".ova is not created as expected";
+                String msg = exportDir + File.separator + ovaFileName + " is not created as expected";
                 s_logger.error(msg);
                 throw new Exception(msg);
             }
         } catch (Exception e) {
-            s_logger.error("Exception in getOVAFromMetafile", e);
-            return null;
-            // Do something, re-throw the exception
+            s_logger.error("Exception while creating OVA using Metafile", e);
+            throw e;
         } finally {
             if (strm != null) {
                 try {


[49/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8136. [VMware] Create VM snapshot fails if a previous attempt to take the snapshot failed.
While looking for an ongoing VM snapshot task, check the task status to identify if the task is still running.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/25a4f0dc
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/25a4f0dc
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/25a4f0dc

Branch: refs/heads/reporter
Commit: 25a4f0dc530951e87fe357313dc983cb75aa8972
Parents: 13bdc1c
Author: Likitha Shetty <li...@citrix.com>
Authored: Tue Dec 30 14:16:00 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Tue Dec 30 14:19:48 2014 +0530

----------------------------------------------------------------------
 .../hypervisor/vmware/manager/VmwareStorageManagerImpl.java   | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25a4f0dc/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 1981303..3aec7a4 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -37,6 +37,7 @@ import com.vmware.vim25.HostDatastoreBrowserSearchResults;
 import com.vmware.vim25.HostDatastoreBrowserSearchSpec;
 import com.vmware.vim25.ManagedObjectReference;
 import com.vmware.vim25.TaskInfo;
+import com.vmware.vim25.TaskInfoState;
 import com.vmware.vim25.VirtualDisk;
 
 import org.apache.cloudstack.storage.to.TemplateObjectTO;
@@ -1181,8 +1182,10 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
                 TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info"));
 
                 if (info.getEntityName().equals(cmd.getVmName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
-                    s_logger.debug("There is already a VM snapshot task running, wait for it");
-                    context.getVimClient().waitForTask(taskMor);
+                    if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) {
+                        s_logger.debug("There is already a VM snapshot task running, wait for it");
+                        context.getVimClient().waitForTask(taskMor);
+                    }
                 }
             }
 


[45/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8128. VM with Datadisk fails to start when storage is put into maintenance.
While migrating a volume from primary to seocndary use a worker VM to perform the operations if the VM associated with the volume is stopped.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/5227ae22
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/5227ae22
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/5227ae22

Branch: refs/heads/reporter
Commit: 5227ae22755cac55407ba590b23d3d490df120af
Parents: ff7997a
Author: Likitha Shetty <li...@citrix.com>
Authored: Wed Dec 17 17:02:47 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 16:57:37 2014 +0530

----------------------------------------------------------------------
 .../src/com/cloud/storage/resource/VmwareStorageProcessor.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/5227ae22/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
index 247c220..e601bb4 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
@@ -610,7 +610,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
             }
 
             vmMo = hyperHost.findVmOnHyperHost(vmName);
-            if (vmMo == null) {
+            if (vmMo == null || VmwareResource.getVmState(vmMo) == PowerState.PowerOff) {
                 // create a dummy worker vm for attaching the volume
                 DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDs);
                 workerVm = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVmName);


[43/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8126. Cold Migration of VM is not working as expected. In case a VM is cold migrated across clusters then VM fails to start.
1. If a VM by the same name exists on a different cluster in the VMware DC, unregister the existing VM and continue with the VM start.
2. If VM start succeeds, delete VM files associated with the unregistered VM.
3. If VM start fails, re-register the unregistered VM.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/974b0180
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/974b0180
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/974b0180

Branch: refs/heads/reporter
Commit: 974b0180dd67f19fea921092105161f849891ac5
Parents: 7b32b8a
Author: Likitha Shetty <li...@citrix.com>
Authored: Mon Dec 8 18:59:51 2014 +0530
Committer: Likitha Shetty <li...@citrix.com>
Committed: Wed Dec 24 15:07:08 2014 +0530

----------------------------------------------------------------------
 .../vmware/resource/VmwareResource.java         | 77 +++++++++++++++++++-
 .../hypervisor/vmware/mo/VirtualMachineMO.java  | 36 +++++++++
 2 files changed, 111 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/974b0180/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 53cdb99..0dfde45 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -84,6 +84,9 @@ import com.vmware.vim25.VirtualEthernetCard;
 import com.vmware.vim25.VirtualEthernetCardDistributedVirtualPortBackingInfo;
 import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
 import com.vmware.vim25.VirtualMachineConfigSpec;
+import com.vmware.vim25.VirtualMachineFileInfo;
+import com.vmware.vim25.VirtualMachineFileLayoutEx;
+import com.vmware.vim25.VirtualMachineFileLayoutExFileInfo;
 import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
 import com.vmware.vim25.VirtualMachinePowerState;
 import com.vmware.vim25.VirtualMachineRelocateSpec;
@@ -1326,17 +1329,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         VirtualMachineTO vmSpec = cmd.getVirtualMachine();
         boolean vmAlreadyExistsInVcenter = false;
 
+        String existingVmName = null;
+        VirtualMachineFileInfo existingVmFileInfo = null;
+        VirtualMachineFileLayoutEx existingVmFileLayout = null;
+
         Pair<String, String> names = composeVmNames(vmSpec);
         String vmInternalCSName = names.first();
         String vmNameOnVcenter = names.second();
 
         // Thus, vmInternalCSName always holds i-x-y, the cloudstack generated internal VM name.
         VmwareContext context = getServiceContext();
+        DatacenterMO dcMo = null;
         try {
             VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
 
             VmwareHypervisorHost hyperHost = getHyperHost(context);
-            DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
+            dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter());
 
             // Validate VM name is unique in Datacenter
             VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
@@ -1404,6 +1412,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                         vmMo.tearDownDevices(new Class<?>[] {VirtualEthernetCard.class});
                     vmMo.ensureScsiDeviceController();
                 } else {
+                    // If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration).
+                    VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName);
+                    if (existingVmInDc != null) {
+                        s_logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM.");
+                        existingVmName = existingVmInDc.getName();
+                        existingVmFileInfo = existingVmInDc.getFileInfo();
+                        existingVmFileLayout = existingVmInDc.getFileLayout();
+                        existingVmInDc.unregisterVm();
+                    }
                     Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = null;
                     for (DiskTO vol : disks) {
                         if (vol.getType() == Volume.Type.ROOT) {
@@ -1429,7 +1446,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
                     assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null);
 
-                    if (rootDiskDataStoreDetails.second().folderExists(String.format("[%s]", rootDiskDataStoreDetails.second().getName()), vmNameOnVcenter)) {
+                    boolean vmFolderExists = rootDiskDataStoreDetails.second().folderExists(String.format("[%s]", rootDiskDataStoreDetails.second().getName()), vmNameOnVcenter);
+                    String vmxFileFullPath = dsRootVolumeIsOn.searchFileInSubFolders(vmNameOnVcenter + ".vmx", false);
+                    if (vmFolderExists && vmxFileFullPath != null) { // VM can be registered only if .vmx is present.
                         registerVm(vmNameOnVcenter, dsRootVolumeIsOn);
                         vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
                         tearDownVm(vmMo);
@@ -1740,6 +1759,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
             startAnswer.setIqnToPath(iqnToPath);
 
+            // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it.
+            if (existingVmName != null && existingVmFileLayout != null) {
+                deleteUnregisteredVmFiles(existingVmFileLayout, dcMo);
+            }
+
             return startAnswer;
         } catch (Throwable e) {
             if (e instanceof RemoteException) {
@@ -1753,6 +1777,20 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             if(vmAlreadyExistsInVcenter) {
                 startAnswer.setContextParam("stopRetry", "true");
             }
+
+            // Since VM start failed, if there was an existing VM in a different cluster that was unregistered, register it back.
+            if (existingVmName != null && existingVmFileInfo != null) {
+                s_logger.debug("Since VM start failed, registering back an existing VM: " + existingVmName + " that was unregistered");
+                try {
+                    DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName());
+                    DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
+                    registerVm(existingVmName, existingVmDsMo);
+                } catch (Exception ex){
+                    String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex);
+                    s_logger.warn(message, ex);
+                }
+            }
+
             return startAnswer;
         } finally {
         }
@@ -2201,6 +2239,41 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         }
     }
 
+    private void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo) throws Exception {
+        s_logger.debug("Deleting files associated with an existing VM that was unregistered");
+        DatastoreFile vmFolder = null;
+        try {
+            List<VirtualMachineFileLayoutExFileInfo> fileInfo = vmFileLayout.getFile();
+            for (VirtualMachineFileLayoutExFileInfo file : fileInfo) {
+                DatastoreFile fileInDatastore = new DatastoreFile(file.getName());
+                // In case of linked clones, VM file layout includes the base disk so don't delete all disk files.
+                if (file.getType().startsWith("disk") || file.getType().startsWith("digest"))
+                    continue;
+                else if (file.getType().equals("config"))
+                    vmFolder = new DatastoreFile(fileInDatastore.getDatastoreName(), fileInDatastore.getDir());
+                DatastoreMO dsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
+                s_logger.debug("Deleting file: " + file.getName());
+                dsMo.deleteFile(file.getName(), dcMo.getMor(), true);
+            }
+            // Delete files that are present in the VM folder - this will take care of the VM disks as well.
+            DatastoreMO vmFolderDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(vmFolder.getDatastoreName()));
+            String[] files = vmFolderDsMo.listDirContent(vmFolder.getPath());
+            if (files.length != 0) {
+                for (String file : files) {
+                    String vmDiskFileFullPath = String.format("%s/%s", vmFolder.getPath(), file);
+                    s_logger.debug("Deleting file: " + vmDiskFileFullPath);
+                    vmFolderDsMo.deleteFile(vmDiskFileFullPath, dcMo.getMor(), true);
+                }
+            }
+            // Delete VM folder
+            s_logger.debug("Deleting folder: " + vmFolder.getPath());
+            vmFolderDsMo.deleteFolder(vmFolder.getPath(), dcMo.getMor());
+        } catch (Exception e) {
+            String message = "Failed to delete files associated with an existing VM that was unregistered due to " + VmwareHelper.getExceptionMessage(e);
+            s_logger.warn(message, e);
+        }
+    }
+
     private static VolumeObjectTO getVolumeInSpec(VirtualMachineTO vmSpec, VolumeObjectTO srcVol) {
         for (DiskTO disk : vmSpec.getDisks()) {
             VolumeObjectTO vol = (VolumeObjectTO)disk.getData();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/974b0180/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index 286aedd..5f180e1 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -86,6 +86,7 @@ import com.vmware.vim25.VirtualMachineConfigOption;
 import com.vmware.vim25.VirtualMachineConfigSpec;
 import com.vmware.vim25.VirtualMachineConfigSummary;
 import com.vmware.vim25.VirtualMachineFileInfo;
+import com.vmware.vim25.VirtualMachineFileLayoutEx;
 import com.vmware.vim25.VirtualMachineMessage;
 import com.vmware.vim25.VirtualMachineMovePriority;
 import com.vmware.vim25.VirtualMachinePowerState;
@@ -740,6 +741,41 @@ public class VirtualMachineMO extends BaseMO {
         return (VirtualMachineFileInfo)_context.getVimClient().getDynamicProperty(_mor, "config.files");
     }
 
+    public VirtualMachineFileLayoutEx getFileLayout() throws Exception {
+        VirtualMachineFileLayoutEx fileLayout = null;
+        PropertySpec pSpec = new PropertySpec();
+        pSpec.setType("VirtualMachine");
+        pSpec.getPathSet().add("layoutEx");
+
+        ObjectSpec oSpec = new ObjectSpec();
+        oSpec.setObj(_mor);
+        oSpec.setSkip(Boolean.FALSE);
+
+        PropertyFilterSpec pfSpec = new PropertyFilterSpec();
+        pfSpec.getPropSet().add(pSpec);
+        pfSpec.getObjectSet().add(oSpec);
+        List<PropertyFilterSpec> pfSpecArr = new ArrayList<PropertyFilterSpec>();
+        pfSpecArr.add(pfSpec);
+
+        List<ObjectContent> ocs = _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr);
+
+        if (ocs != null) {
+            for (ObjectContent oc : ocs) {
+                List<DynamicProperty> props = oc.getPropSet();
+                if (props != null) {
+                    for (DynamicProperty prop : props) {
+                        if (prop.getName().equals("layoutEx")) {
+                            fileLayout = (VirtualMachineFileLayoutEx)prop.getVal();
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+
+        return fileLayout;
+    }
+
     @Override
     public ManagedObjectReference getParentMor() throws Exception {
         return (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "parent");


[03/50] [abbrv] git commit: updated refs/heads/reporter to b26f3fc

Posted by wi...@apache.org.
CLOUDSTACK-8090: Moving test_dedicated_guest_vlan_ranges.py to maint folder for the test cases need to be run separately, serially

Signed-off-by: SrikanteswaraRao Talluri <ta...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/95b55841
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/95b55841
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/95b55841

Branch: refs/heads/reporter
Commit: 95b558414f0808d63920a2906f484fd6b15a4713
Parents: 923c65d
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Thu Dec 18 15:58:36 2014 +0530
Committer: SrikanteswaraRao Talluri <ta...@apache.org>
Committed: Fri Dec 19 10:13:06 2014 +0530

----------------------------------------------------------------------
 .../maint/test_dedicate_guest_vlan_ranges.py    | 1241 ++++++++++++++++++
 .../test_dedicate_guest_vlan_ranges.py          | 1241 ------------------
 2 files changed, 1241 insertions(+), 1241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/95b55841/test/integration/component/maint/test_dedicate_guest_vlan_ranges.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/test_dedicate_guest_vlan_ranges.py b/test/integration/component/maint/test_dedicate_guest_vlan_ranges.py
new file mode 100644
index 0000000..9a0e7f0
--- /dev/null
+++ b/test/integration/component/maint/test_dedicate_guest_vlan_ranges.py
@@ -0,0 +1,1241 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+""" P1 tests for Dedicating guest VLAN ranges
+
+    Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Dedicated+Resources+-+Public+IP+Addresses+and+VLANs+per+Tenant+Test+Plan
+
+    Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-2251
+
+    Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/FS-+Dedicate+Guest+VLANs+per+tenant
+"""
+#Import Local Modules
+from nose.plugins.attrib import attr
+from marvin.cloudstackTestCase import cloudstackTestCase, unittest
+from marvin.lib.utils import (validateList,
+                              cleanup_resources,
+                              random_gen,
+			                  xsplit)
+from marvin.lib.base import (Account,
+                             Domain,
+                             PhysicalNetwork,
+                             NetworkOffering,
+                             Network,
+                             ServiceOffering,
+                             Project)
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template,
+                               setNonContiguousVlanIds,
+                               isNetworkDeleted)
+from marvin.codes import PASS
+
+def LimitVlanRange(self, vlanrange, range=2):
+    """Limits the length of vlan range"""
+    vlan_endpoints = str(vlanrange).split("-")
+    vlan_startid = int(vlan_endpoints[1])
+    vlan_endid = vlan_startid + (range-1)
+    return str(vlan_startid) + "-" + str(vlan_endid)
+
+class TestDedicateGuestVLANRange(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.testClient = super(TestDedicateGuestVLANRange, cls).getClsTestClient()
+        cls.apiclient = cls.testClient.getApiClient()
+        cls.testdata =  cls.testClient.getParsedTestDataConfig()
+        # Get Zone, Domain
+        cls.domain = get_domain(cls.apiclient)
+        cls.zone = get_zone(cls.apiclient)
+        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
+        cls.testdata['mode'] = cls.zone.networktype
+        template = get_template(
+            cls.apiclient,
+            cls.zone.id,
+            cls.testdata["ostype"]
+            )
+        cls._cleanup = []
+
+        try:
+            cls.isolated_network_offering = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_network_offering)
+            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
+
+            cls.testdata["nw_off_isolated_persistent"]["specifyVlan"] = True
+            cls.isolated_network_offering_vlan = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_network_offering_vlan)
+            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
+
+            cls.service_offering = ServiceOffering.create(
+                                                          cls.apiclient,
+                                                          cls.testdata["service_offering"])
+            cls._cleanup.append(cls.service_offering)
+
+            cls.testdata["small"]["zoneid"] = cls.zone.id
+            cls.testdata["small"]["template"] = template.id
+        except Exception as e:
+            cls.tearDownClass()
+            raise unittest.SkipTest(e)
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            # Cleanup resources used
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
+                                                                            self.zone.id)
+        return
+
+    def tearDown(self):
+        try:
+            # Clean up
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        finally:
+            self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_01_dedicate_guest_vlan_range_root_domain(self):
+        """Dedicate guest vlan range to account in root domain
+
+        # Validate the following:
+        # 1. Create two accounts under root domain
+        # 2. Dedicate a new vlan range to account 1
+        # 3. Verify that the new vlan range is dedicated to account 1
+             by listing the dedicated range and checking the account name
+        # 4. Try to create a guest network in account 2 usign the vlan in dedicated range
+        # 5. The operation should fail
+        # 6. Create a guest network in account 2
+        # 7. Verify that the vlan for guest network is acquired from the dedicated range
+        # 8. Delete the guest network in account 2
+        # 9. Verify that the network is deleted
+        # 10.Verify that the vlan is still dedicated to account 1 after deleting the network
+        # 11.Release the vlan range back to the system
+        # 12.Verify that ther list of dedicated vlans doesn't contain the vlan
+        """
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account1)
+
+        self.account2 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account2)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account1.name,
+                                                domainid=self.account1.domainid
+                                            )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
+
+        with self.assertRaises(Exception):
+            isolated_network1 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account2.name,
+                                   self.account2.domainid,
+                                   networkofferingid=self.isolated_network_offering_vlan.id,
+                                   vlan=int(dedicatedvlans[0]))
+            isolated_network1.delete(self.apiclient)
+
+        isolated_network2 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account1.name,
+                                   self.account1.domainid,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network2.id)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network2.delete(self.apiclient)
+        self.assertTrue(isNetworkDeleted(self.apiclient, networkid=isolated_network2.id),
+                        "Network not deleted in timeout period")
+
+        # List after deleting all networks, it should still be dedicated to the account
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        self.debug("Releasing guest vlan range");
+        dedicate_guest_vlan_range_response.release(self.apiclient)
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
+        self.assertEqual(
+                        list_dedicated_guest_vlan_range_response,
+                        None,
+                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
+
+                        )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_02_dedicate_guest_vlan_range_user_domain(self):
+        """Dedicate guest vlan range to account in user domain
+
+        # Validate the following:
+        # 1. Create two accounts under user domain
+        # 2. Dedicate a new vlan range to account 1
+        # 3. Verify that the new vlan range is dedicated to account 1
+             by listing the dedicated range and checking the account name
+        # 4. Try to create a guest network in account 2 usign the vlan in dedicated range
+        # 5. The operation should fail
+        # 6. Create a guest network in account 2
+        # 7. Verify that the vlan for guest network is acquired from the dedicated range
+        # 8. Delete the guest network in account 2
+        # 9. Verify that the network is deleted
+        # 10.Verify that the vlan is still dedicated to account 1 after deleting the network
+        # 11.Release the vlan range back to the system
+        # 12.Verify that ther list of dedicated vlans doesn't contain the vlan
+        """
+        self.user_domain1 = Domain.create(
+                                self.apiclient,
+                                services=self.testdata["domain"],
+                                parentdomainid=self.domain.id)
+        self.cleanup.append(self.user_domain1)
+
+        #Create Account
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.user_domain1.id
+                            )
+        self.cleanup.insert(-1, self.account1)
+
+        #Create Account
+        self.account2 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.user_domain1.id
+                            )
+        self.cleanup.insert(-1, self.account2)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account1.name,
+                                                domainid=self.account1.domainid
+                                            )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
+
+        with self.assertRaises(Exception):
+            isolated_network1 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account2.name,
+                                   self.account2.domainid,
+                                   networkofferingid=self.isolated_network_offering_vlan.id,
+                                   vlan=int(dedicatedvlans[0]))
+            isolated_network1.delete(self.apiclient)
+
+        isolated_network2 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account1.name,
+                                   self.account1.domainid,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network2.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network2.delete(self.apiclient)
+        self.assertTrue(isNetworkDeleted(self.apiclient, networkid=isolated_network2.id),
+                        "Network not deleted in timeout period")
+
+        self.debug("Releasing guest vlan range");
+        dedicate_guest_vlan_range_response.release(self.apiclient)
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
+        self.assertEqual(
+                        list_dedicated_guest_vlan_range_response,
+                        None,
+                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
+
+                        )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_03_multiple_guest_netwoks(self):
+        """Dedicate multiple guest networks in account with dedicated vlan range
+
+        # Validate the following:
+        # 1. Create account under user domain
+        # 2. Dedicate a new vlan range of range 2 to account
+        # 3. Verify that the new vlan range is dedicated to account
+             by listing the dedicated range and checking the account name
+        # 4. Create a guest network in the account
+        # 5. Verify that the vlan of the network is from dedicated range
+        # 6. Repeat steps 4 and 5 for network 2
+        # 7. Now create 3rd guest network in the account
+        # 8. Verify that the vlan of the network is not from the dedicated range, as
+             all the vlans in dedicated range are now exhausted
+        """
+        self.user_domain = Domain.create(
+                                self.apiclient,
+                                services=self.testdata["domain"],
+                                parentdomainid=self.domain.id)
+        self.cleanup.append(self.user_domain)
+
+        #Create Account
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.user_domain.id
+                            )
+        self.cleanup.insert(-1, self.account)
+
+        self.free_vlan["partial_range"][0] = LimitVlanRange(self, self.free_vlan["partial_range"][0], range=2)
+        vlan_startid = int(str(self.free_vlan["partial_range"][0]).split("-")[0])
+        vlan_endid = vlan_startid + 1
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid
+                                            )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        isolated_network1 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account.name,
+                                   self.account.domainid,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network1.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network2 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account.name,
+                                   self.account.domainid,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network2.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network3 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account.name,
+                                   self.account.domainid,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network3.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertFalse(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should not be from the dedicated range")
+        return
+
+
+    @attr(tags = ["invalid"])
+    def test_04_dedicate_guest_vlan_in_project(self):
+        """Dedicate guest vlan range project owner account and test guest network vlan in project
+
+        # Validate the following:
+        # 1. Create account under user domain
+        # 2. Create a project with this account
+        # 3. Dedicate a new vlan range to the account
+        # 4. Verify that the new vlan range is dedicated to account
+             by listing the dedicated range and checking the account name
+        # 5. Create a guest network in the project
+        # 6. Verify that the vlan of the network is from dedicated range
+        # 7. Repeat steps 4 and 5 for network 2
+        # 8. Now create 3rd guest network in the account
+        # 9. Verify that the vlan of the network is not from the dedicated range, as
+             all the vlans in dedicated range are now exhausted
+        """
+        user_domain = Domain.create(
+                                self.apiclient,
+                                services=self.testdata["domain"],
+                                parentdomainid=self.domain.id)
+        self.cleanup.append(user_domain)
+        #Create Account
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=user_domain.id
+                            )
+        self.cleanup.insert(-1, self.account)
+        # Create project as a domain admin
+        project = Project.create(self.apiclient,
+                                 self.testdata["project"],
+                                 account=self.account.name,
+                                 domainid=self.account.domainid)
+        self.cleanup.insert(-2, project)
+
+        self.free_vlan["partial_range"][0] = LimitVlanRange(self, self.free_vlan["partial_range"][0], range=2)
+        vlan_startid = int(str(self.free_vlan["partial_range"][0]).split("-")[0])
+        vlan_endid = vlan_startid + 1
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid
+                                                )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        isolated_network1 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   projectid=project.id,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network1.id, projectid=project.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network2 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   projectid=project.id,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network2.id, projectid=project.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should be from the dedicated range")
+
+        isolated_network3 = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   projectid=project.id,
+                                   networkofferingid=self.isolated_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network3.id, projectid=project.id, listall=True)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertFalse(vlan_startid <= int(networks[0].vlan) <= vlan_endid,
+                        "Vlan of the network should be from the dedicated range")
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_05_dedicate_range_different_accounts(self):
+        """Dedicate two different vlan ranges to two different accounts
+
+        # Validate the following:
+        # 1. Create two accounts in root domain
+        # 2. Update the physical network with two different vlan ranges
+        # 3. Dedicate first vlan range to the account 1
+        # 4. Dedicate 2nd vlan range to account 2
+        # 5. Both the operations should be successful
+        """
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account1)
+
+        self.account2 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account2)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0] + ","+\
+                   self.free_vlan["partial_range"][1]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                 self.apiclient,
+                                                 self.free_vlan["partial_range"][0],
+                                                 physicalnetworkid=self.physical_network.id,
+                                                 account=self.account1.name,
+                                                 domainid=self.account1.domainid
+                                                 )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                       self.apiclient,
+                                                       id=dedicate_guest_vlan_range_response.id
+                                                       )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                 self.apiclient,
+                                                 self.free_vlan["partial_range"][1],
+                                                 physicalnetworkid=self.physical_network.id,
+                                                 account=self.account2.name,
+                                                 domainid=self.account2.domainid
+                                                 )
+
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                       self.apiclient,
+                                                       id=dedicate_guest_vlan_range_response.id
+                                                       )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                         dedicated_guest_vlan_response.account,
+                         self.account2.name,
+                         "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                         )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_07_extend_vlan_range(self):
+        """Dedicate vlan range to an account when some vlan in range are already acquired by same account
+
+        # Validate the following:
+        # 1. Create account under root domain
+        # 2. Add a new vlan range to the physical network
+        # 3. Create a guest network in account using the vlan id from the newly added range
+        # 4. Try to dedicate the vlan range to account
+        # 5. Operation should succeed
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+
+        vlans = str(self.free_vlan["partial_range"][0]).split("-")
+        startid = int(vlans[0])
+        endid = int(vlans[1])
+
+        vlan_range1 = str(startid) + "-" + str(endid)
+        vlan_range2 = str(endid+1) + "-" + str(endid+2)
+        full_range = str(startid) + "-" + str(endid+2)
+
+        new_vlan = self.physical_network.vlan + "," + full_range
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating first range
+        PhysicalNetwork.dedicate(
+                                 self.apiclient,
+                                 vlan_range1,
+                                 physicalnetworkid=self.physical_network.id,
+                                 account=self.account.name,
+                                 domainid=self.account.domainid
+                                 )
+
+        # Dedicating second range
+        PhysicalNetwork.dedicate(
+                                 self.apiclient,
+                                 vlan_range2,
+                                 physicalnetworkid=self.physical_network.id,
+                                 account=self.account.name,
+                                 domainid=self.account.domainid
+                                 )
+
+        dedicated_ranges = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid,
+                                                listall=True
+                                                )
+        self.assertEqual(str(dedicated_ranges[0].guestvlanrange), full_range, "Dedicated vlan\
+                         range not matching with expcted extended range")
+
+        return
+
+class TestFailureScenarios(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.testClient = super(TestFailureScenarios, cls).getClsTestClient()
+        cls.apiclient = cls.testClient.getApiClient()
+        cls.testdata =  cls.testClient.getParsedTestDataConfig()
+        # Get Zone, Domain
+        cls.domain = get_domain(cls.apiclient)
+        cls.zone = get_zone(cls.apiclient)
+        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
+        cls.testdata['mode'] = cls.zone.networktype
+        template = get_template(
+            cls.apiclient,
+            cls.zone.id,
+            cls.testdata["ostype"]
+            )
+        cls._cleanup = []
+
+        try:
+            cls.isolated_network_offering = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_network_offering)
+            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
+
+            cls.testdata["nw_off_isolated_persistent"]["specifyVlan"] = True
+            cls.isolated_network_offering_vlan = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_network_offering_vlan)
+            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
+
+            cls.service_offering = ServiceOffering.create(
+                                                          cls.apiclient,
+                                                          cls.testdata["service_offering"])
+            cls._cleanup.append(cls.service_offering)
+
+            cls.testdata["small"]["zoneid"] = cls.zone.id
+            cls.testdata["small"]["template"] = template.id
+        except Exception as e:
+            cls.tearDownClass()
+            raise unittest.SkipTest(e)
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            # Cleanup resources used
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
+                                                                            self.zone.id)
+        return
+
+    def tearDown(self):
+        try:
+            # Clean up
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        finally:
+            self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_01_dedicate_wrong_vlan_range(self):
+        """Dedicate invalid vlan range to account
+
+        # Validate the following:
+        # 1. Create an account in root domain
+        # 2. Try to update physical network with invalid range (5000-5001)
+             and dedicate it to account
+        # 3. The operation should fail
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+
+        vlan_range = "5000-5001"
+
+        new_vlan = self.physical_network.vlan + "," + vlan_range
+
+        with self.assertRaises(Exception):
+            self.physical_network.update(self.apiclient,
+                                         id=self.physical_network.id,
+                                         vlan=new_vlan)
+
+            # Dedicating guest vlan range
+            PhysicalNetwork.dedicate(
+                                     self.apiclient,
+                                     vlan_range,
+                                     physicalnetworkid=self.physical_network.id,
+                                     account=self.account.name,
+                                     domainid=self.account.domainid
+                                    )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_02_dedicate_vlan_range_invalid_account(self):
+        """Dedicate a guest vlan range to invalid account
+
+        # Validate the following:
+        # 1. Create an account in root domain
+        # 2. Update physical network with new guest vlan range
+        # 3. Try to dedicate it to invalid account
+        # 4. The operation should fail
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                                     id=self.physical_network.id,
+                                     vlan=new_vlan)
+
+        with self.assertRaises(Exception):
+            # Dedicating guest vlan range
+            PhysicalNetwork.dedicate(
+                                     self.apiclient,
+                                     self.free_vlan["partial_range"][0],
+                                     physicalnetworkid=self.physical_network.id,
+                                     account=self.account.name+random_gen(),
+                                     domainid=self.account.domainid
+                                    )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_03_dedicate_already_dedicated_range(self):
+        """Dedicate a guest vlan range which is already dedicated
+
+        # Validate the following:
+        # 1. Create two accounts in root domain
+        # 2. Update physical network with new guest vlan range
+        # 3. Dedicate the vlan range to account 1
+        # 4. Try to dedicate the same range to account 2, operation should fail
+        """
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account1)
+
+        self.account2 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account2)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                                     id=self.physical_network.id,
+                                     vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        PhysicalNetwork.dedicate(
+                                     self.apiclient,
+                                     self.free_vlan["partial_range"][0],
+                                     physicalnetworkid=self.physical_network.id,
+                                     account=self.account1.name,
+                                     domainid=self.account1.domainid
+                                    )
+
+        with self.assertRaises(Exception):
+            # Dedicating guest vlan range
+            PhysicalNetwork.dedicate(
+                                     self.apiclient,
+                                     self.free_vlan["partial_range"][0],
+                                     physicalnetworkid=self.physical_network.id,
+                                     account=self.account2.name,
+                                     domainid=self.account2.domainid
+                                    )
+        return
+
+class TestDeleteVlanRange(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.testClient = super(TestDeleteVlanRange, cls).getClsTestClient()
+        cls.apiclient = cls.testClient.getApiClient()
+        cls.testdata =  cls.testClient.getParsedTestDataConfig()
+        # Get Zone, Domain
+        cls.domain = get_domain(cls.apiclient)
+        cls.zone = get_zone(cls.apiclient)
+        cls.testdata["isolated_network"]["zoneid"] = cls.zone.id
+        cls.testdata['mode'] = cls.zone.networktype
+        template = get_template(
+            cls.apiclient,
+            cls.zone.id,
+            cls.testdata["ostype"]
+            )
+        cls._cleanup = []
+
+        try:
+            cls.isolated_persistent_network_offering = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_persistent_network_offering)
+            cls.isolated_persistent_network_offering.update(cls.apiclient, state='Enabled')
+
+            cls.isolated_network_offering = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["isolated_network_offering"])
+            cls._cleanup.append(cls.isolated_network_offering)
+            cls.isolated_network_offering.update(cls.apiclient, state='Enabled')
+
+            cls.testdata["nw_off_isolated_persistent"]["specifyvlan"] = True
+            cls.isolated_network_offering_vlan = NetworkOffering.create(
+                          cls.apiclient,
+                          cls.testdata["nw_off_isolated_persistent"])
+            cls._cleanup.append(cls.isolated_network_offering_vlan)
+            cls.isolated_network_offering_vlan.update(cls.apiclient, state='Enabled')
+
+            cls.service_offering = ServiceOffering.create(
+                                                          cls.apiclient,
+                                                          cls.testdata["service_offering"])
+            cls._cleanup.append(cls.service_offering)
+
+            cls.testdata["small"]["zoneid"] = cls.zone.id
+            cls.testdata["small"]["template"] = template.id
+        except Exception as e:
+            cls.tearDownClass()
+            raise unittest.SkipTest(e)
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            # Cleanup resources used
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+        self.physical_network, self.free_vlan = setNonContiguousVlanIds(self.apiclient,
+                                                                            self.zone.id)
+        return
+
+    def tearDown(self):
+        try:
+            # Clean up
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        finally:
+            self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_01_delete_dedicated_vlan_range(self):
+        """Try to delete a dedicated vlan range which is not in use
+
+        # Validate the following:
+        # 1. Creat an account in the root domain
+        # 2. update the physical network with a new vlan range
+        # 3. Dedicated this vlan range to the account
+        # 4. Verify that the vlan range is dedicated to the account by listing it
+             and verifying the account name
+        # 5. Try to delete the vlan range by updating physical network vlan, operation should fail
+        # 6. Release the dedicted range and then delete the vlan range
+        # 7. The operation should succeed
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+        new_vlan = self.free_vlan["partial_range"][0]
+        extended_vlan = self.physical_network.vlan + "," + new_vlan
+
+        self.physical_network.update(self.apiclient,
+                                         id=self.physical_network.id,
+                                         vlan=extended_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid
+                                                )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        with self.assertRaises(Exception):
+            # Deleting the dedicated vlan range
+            self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+
+        dedicate_guest_vlan_range_response.release(self.apiclient)
+        self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+        physical_networks = PhysicalNetwork.list(self.apiclient, id=self.physical_network.id, listall=True)
+        self.assertEqual(validateList(physical_networks)[0], PASS, "Physical networks list validation failed")
+        vlans = xsplit(physical_networks[0].vlan, [','])
+        self.assertFalse(new_vlan in vlans, "newly added vlan is not deleted from physical network")
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_02_delete_dedicated_vlan_range_vlan_in_use(self):
+        """Try to delete a dedicated vlan rang which is in use
+
+        # Validate the following:
+        # 1. Creat an account in the root domain
+        # 2. update the physical network with a new vlan range
+        # 3. Dedicated this vlan range to the account
+        # 4. Verify that the vlan range is dedicated to the account by listing it
+             and verifying the account name
+        # 5. Create a guest network in the account and verify that the vlan of network
+             is from the dedicated range
+        # 6. Try to delete the vlan range by updating physical network vlan
+        # 7. The operation should fail
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+
+        self.physical_network.update(self.apiclient,
+                                         id=self.physical_network.id,
+                                         vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid
+                                                )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account.name,
+                                   self.account.domainid,
+                                   networkofferingid=self.isolated_persistent_network_offering.id)
+
+        with self.assertRaises(Exception):
+            # Deleting the dedicated vlan range
+            self.physical_network.update(self.apiclient,
+                        id=self.physical_network.id,
+                        vlan=self.physical_network.vlan)
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_03_delete_account(self):
+        """Try to delete a dedicated vlan rang which is in use
+
+        # Validate the following:
+        # 1. Creat an account in the root domain
+        # 2. Update the physical network with a new vlan range
+        # 3. Dedicated this vlan range to the account
+        # 4. Verify that the vlan range is dedicated to the account by listing it
+             and verifying the account name
+        # 5. Create a guest network in the account which consumes vlan from dedicated range
+        # 6. Delete the account
+        # 7. Verify that the vlan of the physical network remains the same
+        """
+        self.account = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+
+        self.physical_network.update(self.apiclient,
+                                         id=self.physical_network.id,
+                                         vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account.name,
+                                                domainid=self.account.domainid
+                                                )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account.name,
+                                   self.account.domainid,
+                                   networkofferingid=self.isolated_persistent_network_offering.id)
+
+        self.account.delete(self.apiclient)
+        self.cleanup.remove(self.account)
+
+        physical_networks = PhysicalNetwork.list(self.apiclient, id=self.physical_network.id, listall=True)
+        self.assertEqual(validateList(physical_networks)[0], PASS, "Physical networks list validation failed")
+        self.assertEqual(physical_networks[0].vlan, new_vlan, "The vlan of physical network \
+                         should be same after deleting account")
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_04_release_range_no_vlan_in_use(self):
+        """Release a dedicated vlan range when no vlan id is in use
+
+        # Validate the following:
+        # 1. Create account in root domain
+        # 2. Dedicate a new vlan range to account
+        # 3. Verify that the new vlan range is dedicated to account
+             by listing the dedicated range and checking the account name
+        # 4. Release the range
+        # 5. Verify the range is released back to system by listing dedicated ranges (list should be empty)
+        """
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account1)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account1.name,
+                                                domainid=self.account1.domainid
+                                            )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                        )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        self.debug("Releasing guest vlan range");
+        dedicate_guest_vlan_range_response.release(self.apiclient)
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
+        self.assertEqual(
+                        list_dedicated_guest_vlan_range_response,
+                        None,
+                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
+
+                        )
+        return
+
+    @attr(tags = ["advanced", "selfservice"], required_hardware="false")
+    def test_05_release_range_vlan_in_use(self):
+        """Release a dedicated vlan range when no vlan id is in use
+
+        # Validate the following:
+        # 1. Create account in root domain
+        # 2. Dedicate a new vlan range to account
+        # 3. Verify that the new vlan range is dedicated to account
+             by listing the dedicated range and checking the account name
+        # 4. Release the range
+        # 5. The operation should succeed, as all vlans which are not in use should be released
+        """
+        self.account1 = Account.create(
+                            self.apiclient,
+                            self.testdata["account"],
+                            domainid=self.domain.id
+                            )
+        self.cleanup.append(self.account1)
+
+        new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0]
+        self.physical_network.update(self.apiclient,
+                id=self.physical_network.id, vlan=new_vlan)
+
+        # Dedicating guest vlan range
+        dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate(
+                                                self.apiclient,
+                                                self.free_vlan["partial_range"][0],
+                                                physicalnetworkid=self.physical_network.id,
+                                                account=self.account1.name,
+                                                domainid=self.account1.domainid
+                                            )
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(
+                                                self.apiclient,
+                                                id=dedicate_guest_vlan_range_response.id
+                                                )
+        dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0]
+        self.assertEqual(
+                            dedicated_guest_vlan_response.account,
+                            self.account1.name,
+                            "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to"
+                        )
+
+        dedicatedvlans = str(self.free_vlan["partial_range"][0]).split("-")
+
+        isolated_network = Network.create(
+                                   self.apiclient,
+                                   self.testdata["isolated_network"],
+                                   self.account1.name,
+                                   self.account1.domainid,
+                                   networkofferingid=self.isolated_persistent_network_offering.id)
+
+        networks = Network.list(self.apiclient, id=isolated_network.id)
+        self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
+
+        self.assertTrue(int(dedicatedvlans[0]) <= int(networks[0].vlan) <= int(dedicatedvlans[1]),
+                        "Vlan of the network should be from the dedicated range")
+
+        self.debug("Releasing guest vlan range");
+        dedicate_guest_vlan_range_response.release(self.apiclient)
+        list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient)
+        self.assertEqual(
+                        list_dedicated_guest_vlan_range_response,
+                        None,
+                        "Check vlan range is not available in listDedicatedGuestVlanRanges"
+
+                        )
+        return