You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by sa...@apache.org on 2016/02/11 11:49:19 UTC

[1/3] git commit: updated refs/heads/master to cc8508d

Repository: cloudstack
Updated Branches:
  refs/heads/master d3dc053b7 -> cc8508d80


CLOUDSTACK-8717: Failed to start instance after restoring the running instance


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/af28ded9
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/af28ded9
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/af28ded9

Branch: refs/heads/master
Commit: af28ded904045734d83cac4c4fe711e5e1315a5b
Parents: 7b47c49
Author: Priti Sarap <pr...@clogeny.com>
Authored: Fri Aug 7 17:57:54 2015 +0530
Committer: Priti Sarap <pr...@clogeny.com>
Committed: Mon Aug 10 11:03:14 2015 +0530

----------------------------------------------------------------------
 .../testpaths/testpath_restore_vm.py            | 201 +++++++++++++++++++
 1 file changed, 201 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/af28ded9/test/integration/testpaths/testpath_restore_vm.py
----------------------------------------------------------------------
diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py
new file mode 100644
index 0000000..7fd1610
--- /dev/null
+++ b/test/integration/testpaths/testpath_restore_vm.py
@@ -0,0 +1,201 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Test restore running VM on VMWare with one cluster having 2 Primary Storage
+"""
+
+
+from nose.plugins.attrib import attr
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.lib.utils import cleanup_resources
+from marvin.lib.base import (Account,
+                             ServiceOffering,
+                             VirtualMachine,
+                             StoragePool
+                             )
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template,
+                               list_volumes,
+                               list_virtual_machines
+                               )
+
+from marvin.codes import CLUSTERTAG1, ROOT
+import time
+
+
+class TestRestoreVM(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        testClient = super(TestRestoreVM, cls).getClsTestClient()
+        cls.apiclient = testClient.getApiClient()
+        cls.testdata = testClient.getParsedTestDataConfig()
+        cls.hypervisor = cls.testClient.getHypervisorInfo()
+
+        # Get Zone, Domain and templates
+        cls.domain = get_domain(cls.apiclient)
+        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
+
+        cls.template = get_template(
+            cls.apiclient,
+            cls.zone.id,
+            cls.testdata["ostype"])
+
+        cls._cleanup = []
+
+        try:
+            cls.skiptest = False
+            if cls.hypervisor.lower() not in ["vmware"]:
+                cls.skiptest = True
+                return
+
+            # Create an account
+            cls.account = Account.create(
+                cls.apiclient,
+                cls.testdata["account"],
+                domainid=cls.domain.id
+            )
+            cls._cleanup.append(cls.account)
+            # Create user api client of the account
+            cls.userapiclient = testClient.getUserApiClient(
+                UserName=cls.account.name,
+                DomainName=cls.account.domain
+            )
+            # Create Service offering
+            cls.service_offering_cwps = ServiceOffering.create(
+                cls.apiclient,
+                cls.testdata["service_offering"],
+                tags=CLUSTERTAG1
+            )
+            cls._cleanup.append(cls.service_offering_cwps)
+        except Exception as e:
+            cls.tearDownClass()
+            raise e
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+
+    def setUp(self):
+
+        self.cleanup = []
+        if self.skiptest:
+            self.skipTest("This test is to be checked on VMWare only \
+                    Hence, skip for %s" % self.hypervisor)
+
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+
+    def tearDown(self):
+        try:
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    @attr(tags=["advanced", "basic"], required_hardware="true")
+    def test_01_recover_VM(self):
+        """ Test Restore VM on VMWare
+            1. Deploy a VM without datadisk
+            2. Restore the VM
+            3. Verify that VM comes up in Running state
+        """
+        try:
+            self.pools = StoragePool.list(
+                self.apiclient,
+                zoneid=self.zone.id,
+                scope="CLUSTER")
+
+            if len(self.pools) < 2:
+                self.skipTest("There must be at atleast two cluster wide\
+                storage pools available in the setup")
+
+        except Exception as e:
+            self.skipTest(e)
+
+        # Adding tags to Storage Pools
+        cluster_no = 1
+        self.debug("Storage Pools: %s" % self.pools)
+        for storagePool in self.pools:
+            if storagePool.scope == "CLUSTER":
+                StoragePool.update(
+                    self.apiclient,
+                    id=storagePool.id,
+                    tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
+                cluster_no += 1
+
+        self.vm = VirtualMachine.create(
+            self.apiclient,
+            self.testdata["small"],
+            accountid=self.account.name,
+            templateid=self.template.id,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering_cwps.id,
+            zoneid=self.zone.id,
+        )
+        # Step 2
+
+        volumes_root_list = list_volumes(
+            self.apiclient,
+            virtualmachineid=self.vm.id,
+            type=ROOT,
+            listall=True
+        )
+
+        root_volume = volumes_root_list[0]
+
+        # Restore VM till its ROOT disk is recreated on onother Primary Storage
+        while True:
+            self.vm.restore(self.apiclient)
+            volumes_root_list = list_volumes(
+                self.apiclient,
+                virtualmachineid=self.vm.id,
+                type=ROOT,
+                listall=True
+            )
+
+            root_volume = volumes_root_list[0]
+
+            if root_volume.storage != self.pools[0].name:
+                break
+
+        # Step 3
+        vm_list = list_virtual_machines(
+            self.apiclient,
+            id=self.vm.id)
+
+        state = vm_list[0].state
+        i = 0
+        while(state != "Running"):
+            vm_list = list_virtual_machines(
+                self.apiclient,
+                id=self.vm.id)
+
+            time.sleep(10)
+            i = i + 1
+            state = vm_list[0].state
+            if i >= 10:
+                self.fail("Restore VM Failed")
+                break
+
+        return


[2/3] git commit: updated refs/heads/master to cc8508d

Posted by sa...@apache.org.
CLOUDSTACK-8717: Failed to start instance after restoring the running instance
	-Modified code to add tag to aonly one cluster wide SP
	-Added validateList function
	-Added code to clear tags in tearDown class


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/4bbf151e
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/4bbf151e
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/4bbf151e

Branch: refs/heads/master
Commit: 4bbf151eb7d39d0ab535a3a7c45d95af71cbd689
Parents: af28ded
Author: Priti Sarap <pr...@clogeny.com>
Authored: Wed Aug 12 12:50:56 2015 +0530
Committer: Priti Sarap <pr...@clogeny.com>
Committed: Wed Aug 12 12:56:12 2015 +0530

----------------------------------------------------------------------
 .../testpaths/testpath_restore_vm.py            | 30 +++++++++++++-------
 1 file changed, 20 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4bbf151e/test/integration/testpaths/testpath_restore_vm.py
----------------------------------------------------------------------
diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py
index 7fd1610..d73499a 100644
--- a/test/integration/testpaths/testpath_restore_vm.py
+++ b/test/integration/testpaths/testpath_restore_vm.py
@@ -22,7 +22,7 @@ Test restore running VM on VMWare with one cluster having 2 Primary Storage
 
 from nose.plugins.attrib import attr
 from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.lib.utils import cleanup_resources
+from marvin.lib.utils import cleanup_resources, validateList
 from marvin.lib.base import (Account,
                              ServiceOffering,
                              VirtualMachine,
@@ -35,7 +35,7 @@ from marvin.lib.common import (get_domain,
                                list_virtual_machines
                                )
 
-from marvin.codes import CLUSTERTAG1, ROOT
+from marvin.codes import CLUSTERTAG1, ROOT, PASS
 import time
 
 
@@ -108,6 +108,12 @@ class TestRestoreVM(cloudstackTestCase):
 
     def tearDown(self):
         try:
+            if self.pools:
+                StoragePool.update(
+                    self.apiclient,
+                    id=self.pools[0].id,
+                    tags="")
+
             cleanup_resources(self.apiclient, self.cleanup)
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
@@ -126,6 +132,14 @@ class TestRestoreVM(cloudstackTestCase):
                 zoneid=self.zone.id,
                 scope="CLUSTER")
 
+            status = validateList(self.pools)
+
+            # Step 3
+            self.assertEqual(
+                status[0],
+                PASS,
+                "Check: Failed to list  cluster wide storage pools")
+
             if len(self.pools) < 2:
                 self.skipTest("There must be at atleast two cluster wide\
                 storage pools available in the setup")
@@ -135,14 +149,10 @@ class TestRestoreVM(cloudstackTestCase):
 
         # Adding tags to Storage Pools
         cluster_no = 1
-        self.debug("Storage Pools: %s" % self.pools)
-        for storagePool in self.pools:
-            if storagePool.scope == "CLUSTER":
-                StoragePool.update(
-                    self.apiclient,
-                    id=storagePool.id,
-                    tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
-                cluster_no += 1
+        StoragePool.update(
+            self.apiclient,
+            id=self.pools[0].id,
+            tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])
 
         self.vm = VirtualMachine.create(
             self.apiclient,


[3/3] git commit: updated refs/heads/master to cc8508d

Posted by sa...@apache.org.
Merge pull request #667 from pritisarap12/CLOUDSTACK-8717-Failed-to-start-instance-after-restoring-the-running-instance

CLOUDSTACK-8717: Failed to start instance after restoring the running instance On setup with two cluster wide primary storage verify restoring a running instance.(As while restoring instance root disk may get created on another primary storage.)

* pr/667:
  CLOUDSTACK-8717: Failed to start instance after restoring the running instance 	-Modified code to add tag to aonly one cluster wide SP 	-Added validateList function 	-Added code to clear tags in tearDown class
  CLOUDSTACK-8717: Failed to start instance after restoring the running instance

Signed-off-by: sanjeev <sa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/cc8508d8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/cc8508d8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/cc8508d8

Branch: refs/heads/master
Commit: cc8508d808b5c68bb3133e8d6c966c0df1bb9532
Parents: d3dc053 4bbf151
Author: sanjeev <sa...@apache.org>
Authored: Thu Feb 11 16:18:47 2016 +0530
Committer: sanjeev <sa...@apache.org>
Committed: Thu Feb 11 16:18:50 2016 +0530

----------------------------------------------------------------------
 .../testpaths/testpath_restore_vm.py            | 211 +++++++++++++++++++
 1 file changed, 211 insertions(+)
----------------------------------------------------------------------