You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ek...@apache.org on 2015/06/08 11:45:22 UTC

[01/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Repository: cloudstack
Updated Branches:
  refs/heads/feature/vpc-ipv6 8c929d58c -> 6140db50b


CLOUDSTACK-8527: Skipping VPC tests on HyperV

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #326


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/900b656d
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/900b656d
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/900b656d

Branch: refs/heads/feature/vpc-ipv6
Commit: 900b656d1295b94a4664018dd018e0c2d85d7b44
Parents: b267c5f
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Fri May 29 15:13:28 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Fri May 29 15:15:52 2015 +0530

----------------------------------------------------------------------
 test/integration/component/maint/test_dedicate_public_ip_range.py | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/900b656d/test/integration/component/maint/test_dedicate_public_ip_range.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/test_dedicate_public_ip_range.py b/test/integration/component/maint/test_dedicate_public_ip_range.py
index 10b4a33..fe6b5d5 100644
--- a/test/integration/component/maint/test_dedicate_public_ip_range.py
+++ b/test/integration/component/maint/test_dedicate_public_ip_range.py
@@ -57,6 +57,7 @@ class TestDedicatePublicIPRange(cloudstackTestCase):
             cls).getClsTestClient()
         cls.apiclient = cls.testClient.getApiClient()
         cls.testdata = cls.testClient.getParsedTestDataConfig()
+        cls.hypervisor = cls.testClient.getHypervisorInfo()
         # Get Zone, Domain
         cls.domain = get_domain(cls.apiclient)
         cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
@@ -549,6 +550,8 @@ class TestDedicatePublicIPRange(cloudstackTestCase):
         # 9. Repeat step 6, this time the IP should not be from
              dedicated range, it should be from global pool
         """
+        if self.hypervisor.lower() in ["hyperv"]:
+            self.skipTest("Skipping test as VPC is not supported on HyperV")
         user_domain = Domain.create(
             self.apiclient,
             services=self.testdata["domain"],


[04/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
framework: don't use raw SQL statements to save certificate in KeystoreDaoImpl

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>
(cherry picked from commit fb88a11f8228a3ff4798333a46c5c72b6b5ad88c)
Signed-off-by: Rohit Yadav <ro...@shapeblue.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ab3b3c7f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ab3b3c7f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ab3b3c7f

Branch: refs/heads/feature/vpc-ipv6
Commit: ab3b3c7fa1f3887a46b79437b0724116149dd96e
Parents: ca3ac68
Author: Rohit Yadav <ro...@shapeblue.com>
Authored: Fri May 29 18:32:40 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Fri May 29 18:33:30 2015 +0200

----------------------------------------------------------------------
 .../security/keystore/KeystoreDaoImpl.java      | 63 ++++++++------------
 1 file changed, 25 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ab3b3c7f/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreDaoImpl.java
----------------------------------------------------------------------
diff --git a/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreDaoImpl.java b/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreDaoImpl.java
index 8a8754d..0ec3c72 100644
--- a/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreDaoImpl.java
+++ b/framework/security/src/org/apache/cloudstack/framework/security/keystore/KeystoreDaoImpl.java
@@ -16,23 +16,17 @@
 // under the License.
 package org.apache.cloudstack.framework.security.keystore;
 
-import java.sql.PreparedStatement;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import javax.ejb.Local;
-
-import org.springframework.stereotype.Component;
-
-import com.cloud.utils.crypt.DBEncryptionUtil;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GenericDaoBase;
 import com.cloud.utils.db.SearchBuilder;
 import com.cloud.utils.db.SearchCriteria;
 import com.cloud.utils.db.SearchCriteria.Op;
-import com.cloud.utils.db.TransactionLegacy;
-import com.cloud.utils.exception.CloudRuntimeException;
+import org.springframework.stereotype.Component;
+
+import javax.ejb.Local;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
 
 @Component
 @Local(value = {KeystoreDao.class})
@@ -96,26 +90,19 @@ public class KeystoreDaoImpl extends GenericDaoBase<KeystoreVO, Long> implements
     @Override
     @DB
     public void save(String name, String certificate, String key, String domainSuffix) {
-        TransactionLegacy txn = TransactionLegacy.currentTxn();
-        try {
-            txn.start();
-
-            String sql =
-                "INSERT INTO keystore (`name`, `certificate`, `key`, `domain_suffix`) VALUES (?, ?, ?, ?) ON DUPLICATE KEY UPDATE `certificate`=?, `key`=?, `domain_suffix`=?";
-            PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql);
-            pstmt.setString(1, name);
-            pstmt.setString(2, certificate);
-            pstmt.setString(3, DBEncryptionUtil.encrypt(key));
-            pstmt.setString(4, domainSuffix);
-            pstmt.setString(5, certificate);
-            pstmt.setString(6, DBEncryptionUtil.encrypt(key));
-            pstmt.setString(7, domainSuffix);
-
-            pstmt.executeUpdate();
-            txn.commit();
-        } catch (Exception e) {
-            txn.rollback();
-            throw new CloudRuntimeException("Unable to save certificate under name " + name + " due to exception", e);
+        KeystoreVO keystore = findByName(name);
+        if (keystore != null) {
+            keystore.setCertificate(certificate);
+            keystore.setKey(key);
+            keystore.setDomainSuffix(domainSuffix);
+            this.update(keystore.getId(), keystore);
+        } else {
+            keystore = new KeystoreVO();
+            keystore.setName(name);
+            keystore.setCertificate(certificate);
+            keystore.setKey(key);
+            keystore.setDomainSuffix(domainSuffix);
+            this.persist(keystore);
         }
     }
 
@@ -130,12 +117,12 @@ public class KeystoreDaoImpl extends GenericDaoBase<KeystoreVO, Long> implements
             ks.setDomainSuffix(domainSuffix);
             this.update(ks.getId(), ks);
         } else {
-            KeystoreVO newks = new KeystoreVO();
-            newks.setCertificate(certificate);
-            newks.setName(alias);
-            newks.setIndex(index);
-            newks.setDomainSuffix(domainSuffix);
-            persist(newks);
+            ks = new KeystoreVO();
+            ks.setCertificate(certificate);
+            ks.setName(alias);
+            ks.setIndex(index);
+            ks.setDomainSuffix(domainSuffix);
+            this.persist(ks);
         }
     }
 }


[27/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CID 1302976 Scanner in try-with-resource

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #352


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/3d4d1527
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/3d4d1527
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/3d4d1527

Branch: refs/heads/feature/vpc-ipv6
Commit: 3d4d15275340bd9bbec76afb535e4a48371ef421
Parents: 109b6e9
Author: Daan Hoogland <da...@gmail.com>
Authored: Wed Jun 3 17:31:19 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Thu Jun 4 00:09:40 2015 +0200

----------------------------------------------------------------------
 .../kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java  | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/3d4d1527/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java
index 38b7e8e..d8228a4 100644
--- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java
+++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/CPUStat.java
@@ -17,12 +17,12 @@
 
 package org.apache.cloudstack.utils.linux;
 
-import org.apache.log4j.Logger;
-
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.util.Scanner;
 
+import org.apache.log4j.Logger;
+
 public class CPUStat {
     private static final Logger s_logger = Logger.getLogger(CPUStat.class);
 
@@ -52,8 +52,9 @@ public class CPUStat {
 
     private UptimeStats getUptimeAndCpuIdleTime() {
         UptimeStats uptime = new UptimeStats(0d, 0d);
-        try {
-            String[] stats =  new Scanner(new File(_uptimeFile)).useDelimiter("\\Z").next().split("\\s+");
+        File f = new File(_uptimeFile);
+        try (Scanner scanner = new Scanner(f);) {
+            String[] stats = scanner.useDelimiter("\\Z").next().split("\\s+");
             uptime = new UptimeStats(Double.parseDouble(stats[0]), Double.parseDouble(stats[1]));
         } catch (FileNotFoundException ex) {
             s_logger.warn("File " + _uptimeFile + " not found:" + ex.toString());


[10/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
fix centos63 build in master. not working since removal of awsapi

Signed-off-by: pdion891 <pd...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/23c990f7
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/23c990f7
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/23c990f7

Branch: refs/heads/feature/vpc-ipv6
Commit: 23c990f7c9a2f071c5acca1a2beaaa4379beb0bf
Parents: 94f1ebb
Author: pdion891 <pd...@apache.org>
Authored: Sun May 31 21:21:27 2015 -0400
Committer: pdion891 <pd...@apache.org>
Committed: Sun May 31 21:50:32 2015 -0400

----------------------------------------------------------------------
 client/tomcatconf/commons-logging.properties.in | 30 ++++++++++++++++++++
 packaging/centos63/cloud.spec                   |  2 +-
 2 files changed, 31 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/23c990f7/client/tomcatconf/commons-logging.properties.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/commons-logging.properties.in b/client/tomcatconf/commons-logging.properties.in
new file mode 100644
index 0000000..1e570d6
--- /dev/null
+++ b/client/tomcatconf/commons-logging.properties.in
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# This is the logging properties that goes to the war, there are two logging conf kept at the 
+# svn, one for developement (one at src/test-resources) and other for producation
+ 
+# Uncomment the next line to disable all logging.
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.NoOpLog
+
+# Uncomment the next line to enable the simple log based logging
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog
+
+# Uncomment the next line to enable log4j based logging
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/23c990f7/packaging/centos63/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec
index 4154560..a194096 100644
--- a/packaging/centos63/cloud.spec
+++ b/packaging/centos63/cloud.spec
@@ -279,7 +279,7 @@ cp -r client/target/cloud-client-ui-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/
 rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/scripts
 rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/vms
 
-for name in db.properties log4j-cloud.xml tomcat6-nonssl.conf tomcat6-ssl.conf server-ssl.xml server-nonssl.xml \
+for name in db.properties log4j-cloud.xml tomcat6-nonssl.conf tomcat6-ssl.conf server-ssl.xml server-nonssl.xml commons-logging.properties \
             catalina.policy catalina.properties classpath.conf tomcat-users.xml web.xml environment.properties java.security.ciphers; do
   mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/$name \
     ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name


[25/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8532: Modification in setupClass to skip testcases rather than throwing an exception on "Exceeding connection limit to Netscaler device"

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #343


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9c2a1ea8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9c2a1ea8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9c2a1ea8

Branch: refs/heads/feature/vpc-ipv6
Commit: 9c2a1ea81e38f9aad49adafcbe98f80716816e1a
Parents: b31b842
Author: pritisarap12 <pr...@clogeny.com>
Authored: Tue Jun 2 11:04:34 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Wed Jun 3 15:19:04 2015 +0530

----------------------------------------------------------------------
 .../component/test_netscaler_lb_algo.py         | 121 ++++++++++++++++---
 .../component/test_netscaler_lb_sticky.py       |  12 +-
 2 files changed, 113 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9c2a1ea8/test/integration/component/test_netscaler_lb_algo.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_netscaler_lb_algo.py b/test/integration/component/test_netscaler_lb_algo.py
index bafecd0..9e5001c 100644
--- a/test/integration/component/test_netscaler_lb_algo.py
+++ b/test/integration/component/test_netscaler_lb_algo.py
@@ -58,6 +58,8 @@ class TestLbWithRoundRobin(cloudstackTestCase):
 
         cls._cleanup = []
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
 
@@ -76,8 +78,12 @@ class TestLbWithRoundRobin(cloudstackTestCase):
                 cls.testdata["service_offering"]
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -90,6 +96,9 @@ class TestLbWithRoundRobin(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.account = Account.create(
@@ -265,6 +274,8 @@ class TestLbWithLeastConn(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -285,8 +296,12 @@ class TestLbWithLeastConn(cloudstackTestCase):
             )
             cls._cleanup.append(cls.service_offering)
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
 
         return
 
@@ -300,6 +315,9 @@ class TestLbWithLeastConn(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.account = Account.create(
@@ -483,6 +501,9 @@ class TestLbWithSourceIp(cloudstackTestCase):
 
         cls._cleanup = []
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
+
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -502,8 +523,12 @@ class TestLbWithSourceIp(cloudstackTestCase):
             )
             cls._cleanup.append(cls.service_offering)
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -516,6 +541,9 @@ class TestLbWithSourceIp(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.account = Account.create(
@@ -692,6 +720,9 @@ class TestLbAlgoRrLc(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
+
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -743,8 +774,12 @@ class TestLbAlgoRrLc(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -757,6 +792,9 @@ class TestLbAlgoRrLc(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []
@@ -904,6 +942,8 @@ class TestLbAlgoLcRr(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -955,8 +995,13 @@ class TestLbAlgoLcRr(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
+
         return
 
     @classmethod
@@ -969,6 +1014,9 @@ class TestLbAlgoLcRr(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []
@@ -1112,6 +1160,8 @@ class TestLbAlgoRrSb(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -1164,8 +1214,13 @@ class TestLbAlgoRrSb(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
+
         return
 
     @classmethod
@@ -1178,6 +1233,8 @@ class TestLbAlgoRrSb(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []
@@ -1325,6 +1382,8 @@ class TestLbAlgoSbRr(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -1378,8 +1437,12 @@ class TestLbAlgoSbRr(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -1392,6 +1455,9 @@ class TestLbAlgoSbRr(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []
@@ -1539,6 +1605,9 @@ class TestLbAlgoSbLc(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
+
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -1592,8 +1661,12 @@ class TestLbAlgoSbLc(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg =e 
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -1606,6 +1679,9 @@ class TestLbAlgoSbLc(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []
@@ -1754,6 +1830,8 @@ class TestLbAlgoLcSb(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.testdata["configurableData"]["netscaler"])
             cls._cleanup.append(cls.netscaler)
             cls.network_offering = NetworkOffering.create(
@@ -1806,8 +1884,12 @@ class TestLbAlgoLcSb(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -1820,6 +1902,9 @@ class TestLbAlgoLcSb(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
+
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9c2a1ea8/test/integration/component/test_netscaler_lb_sticky.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_netscaler_lb_sticky.py b/test/integration/component/test_netscaler_lb_sticky.py
index 3494eae..8ac85ab 100644
--- a/test/integration/component/test_netscaler_lb_sticky.py
+++ b/test/integration/component/test_netscaler_lb_sticky.py
@@ -57,6 +57,8 @@ class TestLbStickyPolicy(cloudstackTestCase):
         cls.testdata["configurableData"]["netscaler"]["lbdevicededicated"] = False
 
         try:
+            cls.exception_string = "Connection limit to CFE exceeded"
+            cls.skiptest = False
             cls.netscaler = add_netscaler(
                 cls.api_client,
                 cls.zone.id,
@@ -110,8 +112,12 @@ class TestLbStickyPolicy(cloudstackTestCase):
                 networkid=cls.network.id
             )
         except Exception as e:
-            cls.tearDownClass()
-            raise Exception("Warning: Exception in setUpClass: %s" % e)
+            if cls.exception_string.lower() in e.lower():
+                cls.skiptest = True
+                cls.exception_msg = e
+            else:
+                cls.tearDownClass()
+                raise Exception("Warning: Exception in setUpClass: %s" % e)
         return
 
     @classmethod
@@ -124,6 +130,8 @@ class TestLbStickyPolicy(cloudstackTestCase):
         return
 
     def setUp(self):
+        if self.skiptest:
+            self.skipTest(self.exception_msg)
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.cleanup = []


[47/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix another case of unnecessary boxing JuniperSrxResource.java:1280, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High Boxing/unboxing to parse a primitive com.cloud.network.resource.JuniperSrxResource.removeDestinationNatRules(Long, Map, List)

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #367


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b9b13d76
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b9b13d76
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b9b13d76

Branch: refs/heads/feature/vpc-ipv6
Commit: b9b13d764dea1446df9e91c296c3bf767dd1ffc9
Parents: 039d632
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:38:01 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:41:07 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/network/resource/JuniperSrxResource.java       | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b9b13d76/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
index 2133937..d89f1e9 100644
--- a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
+++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
@@ -1277,8 +1277,8 @@ public class JuniperSrxResource implements ServerResource {
         for (String[] destNatRule : destNatRules) {
             String publicIp = destNatRule[0];
             String privateIp = destNatRule[1];
-            int srcPort = Integer.valueOf(destNatRule[2]);
-            int destPort = Integer.valueOf(destNatRule[3]);
+            int srcPort = Integer.parseInt(destNatRule[2]);
+            int destPort = Integer.parseInt(destNatRule[3]);
 
             Long publicVlanTag = null;
             if (publicVlanTags.containsKey(publicIp)) {


[41/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix 2 findbugs high priority warnings

VmwareResource.java:315, MS_SHOULD_BE_FINAL, Priority: High
com.cloud.hypervisor.vmware.resource.VmwareResource.s_serviceContext isn't final but should be

VmwareResource.java:331, MS_SHOULD_BE_FINAL, Priority: High
com.cloud.hypervisor.vmware.resource.VmwareResource.s_powerStatesTable isn't final but should be

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #363


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1ca74dac
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1ca74dac
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1ca74dac

Branch: refs/heads/feature/vpc-ipv6
Commit: 1ca74dac269faa890c830ccde67c5007ef2e3c45
Parents: db69c8e
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 11:44:28 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:24:07 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/hypervisor/vmware/resource/VmwareResource.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1ca74dac/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 8726412..83c17dc 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -312,7 +312,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
     protected DiskControllerType _rootDiskController = DiskControllerType.ide;
 
     protected ManagedObjectReference _morHyperHost;
-    protected static ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<VmwareContext>();
+    protected static final ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<VmwareContext>();
     protected String _hostName;
 
     protected List<PropertyMapDynamicBean> _cmdMBeans = new ArrayList<PropertyMapDynamicBean>();
@@ -328,7 +328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
     protected static HashMap<VirtualMachinePowerState, PowerState> s_powerStatesTable;
     static {
-        s_powerStatesTable = new HashMap<VirtualMachinePowerState, PowerState>();
+        final s_powerStatesTable = new HashMap<VirtualMachinePowerState, PowerState>();
         s_powerStatesTable.put(VirtualMachinePowerState.POWERED_ON, PowerState.PowerOn);
         s_powerStatesTable.put(VirtualMachinePowerState.POWERED_OFF, PowerState.PowerOff);
         s_powerStatesTable.put(VirtualMachinePowerState.SUSPENDED, PowerState.PowerOn);


[15/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8487 : Add vMotion related tests

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #268


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/108a74a6
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/108a74a6
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/108a74a6

Branch: refs/heads/feature/vpc-ipv6
Commit: 108a74a6b87fcb84f273b7740d1f46aaf6383708
Parents: 607a63b
Author: Abhinav Roy <ab...@citrix.com>
Authored: Wed May 20 16:07:03 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Tue Jun 2 12:01:12 2015 +0530

----------------------------------------------------------------------
 .../component/maint/testpath_vMotion_vmware.py  | 2983 ++++++++++++++++++
 tools/marvin/marvin/lib/base.py                 |   92 +-
 2 files changed, 3059 insertions(+), 16 deletions(-)
----------------------------------------------------------------------



[11/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
schema: Encode URL safe random vnc password string

/+= may break on some environments, url safe encoded passwords will have -_,
characters which are more acceptable

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>
(cherry picked from commit 19cc5eb9e6f1b5127bf01e4c1ae061875e61cb4c)
Signed-off-by: Rohit Yadav <ro...@shapeblue.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/6c1dde8f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/6c1dde8f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/6c1dde8f

Branch: refs/heads/feature/vpc-ipv6
Commit: 6c1dde8f8632798c8059af27d68cf90209fff284
Parents: 23c990f
Author: Rohit Yadav <ro...@shapeblue.com>
Authored: Mon Jun 1 14:10:54 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Mon Jun 1 14:23:42 2015 +0200

----------------------------------------------------------------------
 engine/schema/src/com/cloud/vm/VMInstanceVO.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6c1dde8f/engine/schema/src/com/cloud/vm/VMInstanceVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/vm/VMInstanceVO.java b/engine/schema/src/com/cloud/vm/VMInstanceVO.java
index 8f42a43..d4b18d0 100644
--- a/engine/schema/src/com/cloud/vm/VMInstanceVO.java
+++ b/engine/schema/src/com/cloud/vm/VMInstanceVO.java
@@ -210,7 +210,7 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject<State, Vi
             SecureRandom random = SecureRandom.getInstance("SHA1PRNG");
             byte[] randomBytes = new byte[16];
             random.nextBytes(randomBytes);
-            vncPassword = Base64.encodeBase64String(randomBytes);
+            vncPassword = Base64.encodeBase64URLSafeString(randomBytes);
         } catch (NoSuchAlgorithmException e) {
             s_logger.error("Unexpected exception in SecureRandom Algorithm selection ", e);
         }


[16/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8521: Improving logging in test case - test_egress_fw_rules.py

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #345


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/8bf59b53
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/8bf59b53
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/8bf59b53

Branch: refs/heads/feature/vpc-ipv6
Commit: 8bf59b53933f5c287caf5ad44cc5147caeb03d65
Parents: 108a74a
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Tue Jun 2 12:39:11 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Tue Jun 2 13:31:29 2015 +0530

----------------------------------------------------------------------
 test/integration/component/test_egress_fw_rules.py | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bf59b53/test/integration/component/test_egress_fw_rules.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py
index 4082981..f362421 100755
--- a/test/integration/component/test_egress_fw_rules.py
+++ b/test/integration/component/test_egress_fw_rules.py
@@ -217,8 +217,7 @@ class TestEgressFWRules(cloudstackTestCase):
         self.debug("Deploying instance in the account: %s" % self.account.name)
 
         project = None
-        try:
-            self.virtual_machine = VirtualMachine.create(self.apiclient,
+        self.virtual_machine = VirtualMachine.create(self.apiclient,
                                                          self.services["virtual_machine"],
                                                          accountid=self.account.name,
                                                          domainid=self.domain.id,
@@ -226,8 +225,6 @@ class TestEgressFWRules(cloudstackTestCase):
                                                          mode=self.zone.networktype if pfrule else 'basic',
                                                          networkids=[str(self.network.id)],
                                                          projectid=project.id if project else None)
-        except Exception as e:
-            self.fail("Virtual machine deployment failed with exception: %s" % e)
         self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id,self.account.name))
 
         # Checking if VM is running or not, in case it is deployed in error state, test case fails


[49/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Revert "Fix 2 findbugs high priority warnings"

This reverts commit 1ca74dac269faa890c830ccde67c5007ef2e3c45.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b272d771
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b272d771
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b272d771

Branch: refs/heads/feature/vpc-ipv6
Commit: b272d7717e67d5a31077bbef13c8a33ea073b9db
Parents: c5437d2
Author: Daan Hoogland <da...@gmail.com>
Authored: Mon Jun 8 10:01:19 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 10:01:19 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/hypervisor/vmware/resource/VmwareResource.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b272d771/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index f7c6db3..d98c308 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -312,7 +312,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
     protected DiskControllerType _rootDiskController = DiskControllerType.ide;
 
     protected ManagedObjectReference _morHyperHost;
-    protected static final ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<VmwareContext>();
+    protected static ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<VmwareContext>();
     protected String _hostName;
 
     protected List<PropertyMapDynamicBean> _cmdMBeans = new ArrayList<PropertyMapDynamicBean>();
@@ -328,7 +328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
     protected static HashMap<VirtualMachinePowerState, PowerState> s_powerStatesTable;
     static {
-        final s_powerStatesTable = new HashMap<VirtualMachinePowerState, PowerState>();
+        s_powerStatesTable = new HashMap<VirtualMachinePowerState, PowerState>();
         s_powerStatesTable.put(VirtualMachinePowerState.POWERED_ON, PowerState.PowerOn);
         s_powerStatesTable.put(VirtualMachinePowerState.POWERED_OFF, PowerState.PowerOff);
         s_powerStatesTable.put(VirtualMachinePowerState.SUSPENDED, PowerState.PowerOn);


[31/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Coverity issue: 1012179 - Commenting out unused variable.

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/fe78c766
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/fe78c766
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/fe78c766

Branch: refs/heads/feature/vpc-ipv6
Commit: fe78c76691d36a1b9e440f54ef0346beb5799a2d
Parents: 159d8c2
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:12:50 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:03 2015 +0200

----------------------------------------------------------------------
 plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fe78c766/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
index f5cecb1..a8ab4f7 100644
--- a/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
@@ -85,8 +85,12 @@ public class Test {
             //System.out.println(vm.toJson());
             final Connection c = new Connection("192.168.189.12", "oracle", "password");
             //System.out.println(Coder.toJson(OvmHost.getDetails(c)));
-            final String txt =
-                    "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}";
+
+            /* This is not being used at the moment.
+             * Coverity issue: 1012179
+             */
+            //final String txt =
+            //        "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}";
 
             //OvmHost.Details d = new GsonBuilder().create().fromJson(txt, OvmHost.Details.class);
             //OvmHost.Details d = Coder.fromJson(txt, OvmHost.Details.class);


[29/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8515: Skipping snapshots tests on HyperV and LXC hypervisors

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #350


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f92a5036
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f92a5036
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f92a5036

Branch: refs/heads/feature/vpc-ipv6
Commit: f92a50363c74ba699dc678489ed75624a2ac75a1
Parents: 00884b3
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Wed Jun 3 15:38:44 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Thu Jun 4 10:56:28 2015 +0530

----------------------------------------------------------------------
 .../component/test_escalations_volumes.py       | 38 ++++++++++++++++----
 1 file changed, 31 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f92a5036/test/integration/component/test_escalations_volumes.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_escalations_volumes.py b/test/integration/component/test_escalations_volumes.py
index 83e0e71..b2e21f6 100644
--- a/test/integration/component/test_escalations_volumes.py
+++ b/test/integration/component/test_escalations_volumes.py
@@ -461,7 +461,7 @@ class TestVolumes(cloudstackTestCase):
         Step6: Resizing data volume
         """
         if self.hypervisor.lower() in ['hyperv']:
-            raise unittest.SkipTest(
+            self.skipTest(
                 "This featureis not supported on existing\
                         hypervisor. Hence, skipping the test")
         # Listing volumes for a user before creating a volume
@@ -584,7 +584,7 @@ class TestVolumes(cloudstackTestCase):
         Step6: Resizing custom volume
         """
         if self.hypervisor.lower() in ['hyperv']:
-            raise unittest.SkipTest(
+            self.skipTest(
                 "This featureis not supported on existing\
                         hypervisor. Hence, skipping the test")
         # Listing all the disk offerings
@@ -710,8 +710,8 @@ class TestVolumes(cloudstackTestCase):
         Step4: Creating Volume from snapshot
         Step5: Creating Template from Snapshot
         """
-        if self.hypervisor.lower() in ['hyperv']:
-            raise unittest.SkipTest(
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
                 "This featureis not supported on existing\
                         hypervisor. Hence, skipping the test")
         list_volumes_before = Volume.list(
@@ -932,6 +932,12 @@ class TestVolumes(cloudstackTestCase):
         Step6: Listing snapshot policies for a volume created in step1 again
         Step7: Verifyign that the list snapshot policy length is increased by 1
         """
+
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
+                "This featureis not supported on existing\
+                        hypervisor. Hence, skipping the test")
+
         list_volumes_before = Volume.list(
             self.userapiclient,
             listall=self.services["listall"])
@@ -1064,6 +1070,12 @@ class TestVolumes(cloudstackTestCase):
         Step6: Listing snapshot policies for a volume created in step1 again
         Step7: Verifyign that the list snapshot policy length is increased by 1
         """
+
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
+                "This featureis not supported on existing\
+                        hypervisor. Hence, skipping the test")
+
         list_volumes_before = Volume.list(
             self.userapiclient,
             listall=self.services["listall"])
@@ -1195,6 +1207,12 @@ class TestVolumes(cloudstackTestCase):
         Step6: Listing snapshot policies for a volume created in step1 again
         Step7: Verifyign that the list snapshot policy length is increased by 1
         """
+
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
+                "This featureis not supported on existing\
+                        hypervisor. Hence, skipping the test")
+
         list_volumes_before = Volume.list(
             self.userapiclient,
             listall=self.services["listall"])
@@ -1330,6 +1348,11 @@ class TestVolumes(cloudstackTestCase):
         Step10:Verifying that the list snapshot policy length is decreased
                by 1
         """
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
+                "This featureis not supported on existing\
+                        hypervisor. Hence, skipping the test")
+
         list_volumes_before = Volume.list(
             self.userapiclient,
             listall=self.services["listall"])
@@ -1473,10 +1496,11 @@ class TestVolumes(cloudstackTestCase):
         Step11: Listign the snapshots from page 2 again and verifyign that
                 list returns none
         """
-        if self.hypervisor.lower() in ['hyperv']:
-            raise unittest.SkipTest(
+        if self.hypervisor.lower() in ["hyperv", "lxc"]:
+            self.skipTest(
                 "This featureis not supported on existing\
                         hypervisor. Hence, skipping the test")
+
         list_volumes_before = Volume.list(
             self.userapiclient,
             listall=self.services["listall"])
@@ -1810,7 +1834,7 @@ class TestVolumes(cloudstackTestCase):
               but not with size X
         """
         if self.hypervisor.lower() in ['hyperv']:
-            raise unittest.SkipTest(
+            self.skipTest(
                 "This featureis not supported on existing\
                         hypervisor. Hence, skipping the test")
         disk_offering = DiskOffering.create(


[22/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Merge branch 'fast_tests' of https://github.com/rsafonseca/cloudstack

This closes #330


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/c2f2f77a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/c2f2f77a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/c2f2f77a

Branch: refs/heads/feature/vpc-ipv6
Commit: c2f2f77ab1cf1967778c33b1c9e8a04ed1b729f2
Parents: e983246 9030036
Author: Rajani Karuturi <ra...@gmail.com>
Authored: Wed Jun 3 10:07:46 2015 +0530
Committer: Rajani Karuturi <ra...@gmail.com>
Committed: Wed Jun 3 10:08:11 2015 +0530

----------------------------------------------------------------------
 pom.xml        | 3 +++
 server/pom.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------



[34/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Formatting the code - Adding final modifier and indenting the code

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/abe09902
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/abe09902
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/abe09902

Branch: refs/heads/feature/vpc-ipv6
Commit: abe09902591cd33490685a02a9320340fcadb79c
Parents: fe78c76
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:14:07 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:04 2015 +0200

----------------------------------------------------------------------
 .../com/cloud/upgrade/dao/VersionDaoImpl.java   | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/abe09902/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
index 3be101b..653124f 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
@@ -64,8 +64,8 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
     }
 
     @Override
-    public VersionVO findByVersion(String version, Step step) {
-        SearchCriteria<VersionVO> sc = AllFieldsSearch.create();
+    public VersionVO findByVersion(final String version, final Step step) {
+        final SearchCriteria<VersionVO> sc = AllFieldsSearch.create();
         sc.setParameters("version", version);
         sc.setParameters("step", step);
 
@@ -96,7 +96,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
                     try {
                         pstmt.executeQuery();
                         return "2.1.8";
-                    } catch (SQLException e) {
+                    } catch (final SQLException e) {
                         s_logger.debug("Assuming the exception means domain_id is not there.");
                         s_logger.debug("No version table and no nics table, returning 2.1.7");
                         return "2.1.7";
@@ -110,7 +110,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
                         pstmt = conn.prepareStatement("SELECT is_static_nat from firewall_rules");
                         pstmt.executeQuery();
                         return "2.2.1";
-                    } catch (SQLException e) {
+                    } catch (final SQLException e) {
                         s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
                         return "2.2.2";
                     } finally {
@@ -124,14 +124,14 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
 
             sc.setParameters("step", Step.Complete);
             Filter filter = new Filter(VersionVO.class, "id", false, 0l, 1l);
-            List<String> upgradedVersions = customSearch(sc, filter);
+            final List<String> upgradedVersions = customSearch(sc, filter);
 
             if (upgradedVersions.isEmpty()) {
 
                 // Check if there are records in Version table
                 filter = new Filter(VersionVO.class, "id", false, 0l, 1l);
                 sc = CurrentVersionSearch.create();
-                List<String> vers = customSearch(sc, filter);
+                final List<String> vers = customSearch(sc, filter);
                 if (!vers.isEmpty()) {
                     throw new CloudRuntimeException("Version table contains records for which upgrade wasn't completed");
                 }
@@ -147,9 +147,9 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
                         pstmt = conn.prepareStatement("SELECT is_static_nat from firewall_rules");
                         pstmt.executeQuery();
                         throw new CloudRuntimeException("Unable to determine the current version, version table exists and empty, " +
-                            "nics table doesn't exist, is_static_nat field exists in firewall_rules table");
+                                "nics table doesn't exist, is_static_nat field exists in firewall_rules table");
                     }
-                } catch (SQLException e) {
+                } catch (final SQLException e) {
                     s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
                     return "2.2.2";
                 } finally {
@@ -160,12 +160,12 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
                 return upgradedVersions.get(0);
             }
 
-        } catch (SQLException e) {
+        } catch (final SQLException e) {
             throw new CloudRuntimeException("Unable to get the current version", e);
         } finally {
             try {
                 conn.close();
-            } catch (SQLException e) {
+            } catch (final SQLException e) {
             }
         }
 


[02/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8526: Use more memory for VM deployment on HyperV when SSH is tried to VM

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
Reviewed-by: Gaurav


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/3e21c9bf
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/3e21c9bf
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/3e21c9bf

Branch: refs/heads/feature/vpc-ipv6
Commit: 3e21c9bf2b48510002709b73d834bd307e70353a
Parents: 900b656
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Fri May 29 16:35:08 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Fri May 29 16:40:33 2015 +0530

----------------------------------------------------------------------
 test/integration/testpaths/testpath_stopped_vm.py | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/3e21c9bf/test/integration/testpaths/testpath_stopped_vm.py
----------------------------------------------------------------------
diff --git a/test/integration/testpaths/testpath_stopped_vm.py b/test/integration/testpaths/testpath_stopped_vm.py
index 7d8886c..927dbd9 100644
--- a/test/integration/testpaths/testpath_stopped_vm.py
+++ b/test/integration/testpaths/testpath_stopped_vm.py
@@ -148,9 +148,13 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
             # Create 2 service offerings with different values for
             # for cpunumber, cpuspeed, and memory
 
-            cls.testdata["service_offering"]["cpunumber"] = "1"
-            cls.testdata["service_offering"]["cpuspeed"] = "128"
-            cls.testdata["service_offering"]["memory"] = "256"
+            cls.testdata["service_offering"]["cpunumber"] = 1
+            cls.testdata["service_offering"]["cpuspeed"] = 128
+            cls.testdata["service_offering"]["memory"] = 256
+
+            if cls.hypervisor.lower() == "hyperv":
+                cls.testdata["service_offering"]["cpuspeed"] = 1024
+                cls.testdata["service_offering"]["memory"] = 1024
 
             cls.service_offering = ServiceOffering.create(
                 cls.apiclient,
@@ -158,9 +162,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
             )
             cls._cleanup.append(cls.service_offering)
 
-            cls.testdata["service_offering"]["cpunumber"] = "2"
-            cls.testdata["service_offering"]["cpuspeed"] = "256"
-            cls.testdata["service_offering"]["memory"] = "512"
+            cls.testdata["service_offering"]["cpunumber"] = 2
 
             cls.service_offering_2 = ServiceOffering.create(
                 cls.apiclient,


[23/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Upgrade Reflections to 0.9.9

Signed-off-by: Rajani Karuturi <ra...@gmail.com>

This closes #333


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/bec44bff
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/bec44bff
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/bec44bff

Branch: refs/heads/feature/vpc-ipv6
Commit: bec44bffb3ed415315793cf83603ce13bf865ff0
Parents: c2f2f77
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Fri May 29 23:32:35 2015 +0200
Committer: Rajani Karuturi <ra...@gmail.com>
Committed: Wed Jun 3 11:01:32 2015 +0530

----------------------------------------------------------------------
 pom.xml                                           |  2 +-
 server/src/com/cloud/api/doc/ApiXmlDocWriter.java |  5 ++++-
 utils/src/com/cloud/utils/ReflectUtil.java        | 12 ++++++++++--
 3 files changed, 15 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/bec44bff/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6e3730b..fa019ef 100644
--- a/pom.xml
+++ b/pom.xml
@@ -88,7 +88,7 @@
     <cs.lang.version>2.6</cs.lang.version>
     <cs.commons-io.version>2.4</cs.commons-io.version>
     <cs.commons-validator.version>1.4.0</cs.commons-validator.version>
-    <cs.reflections.version>0.9.8</cs.reflections.version>
+    <cs.reflections.version>0.9.9</cs.reflections.version>
     <cs.java-ipv6.version>0.15</cs.java-ipv6.version>
     <cs.replace.properties>build/replace.properties</cs.replace.properties>
     <cs.libvirt-java.version>0.5.1</cs.libvirt-java.version>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/bec44bff/server/src/com/cloud/api/doc/ApiXmlDocWriter.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/doc/ApiXmlDocWriter.java b/server/src/com/cloud/api/doc/ApiXmlDocWriter.java
index 2d2cada..166af4b 100644
--- a/server/src/com/cloud/api/doc/ApiXmlDocWriter.java
+++ b/server/src/com/cloud/api/doc/ApiXmlDocWriter.java
@@ -96,12 +96,15 @@ public class ApiXmlDocWriter {
     }
 
     public static void main(String[] args) {
-
         Set<Class<?>> cmdClasses = ReflectUtil.getClassesWithAnnotation(APICommand.class, new String[] {"org.apache.cloudstack.api", "com.cloud.api",
                 "com.cloud.api.commands", "com.globo.globodns.cloudstack.api", "org.apache.cloudstack.network.opendaylight.api",
                 "com.cloud.api.commands.netapp", "org.apache.cloudstack.api.command.admin.zone", "org.apache.cloudstack.network.contrail.api.command"});
 
         for (Class<?> cmdClass : cmdClasses) {
+            if(cmdClass.getAnnotation(APICommand.class)==null){
+               System.out.println("Warning, API Cmd class " + cmdClass.getName() + " has no APICommand annotation ");
+               continue;
+            }
             String apiName = cmdClass.getAnnotation(APICommand.class).name();
             if (s_apiNameCmdClassMap.containsKey(apiName)) {
                 // handle API cmd separation into admin cmd and user cmd with the common api name

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/bec44bff/utils/src/com/cloud/utils/ReflectUtil.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/ReflectUtil.java b/utils/src/com/cloud/utils/ReflectUtil.java
index 9c09980..c8ae954 100644
--- a/utils/src/com/cloud/utils/ReflectUtil.java
+++ b/utils/src/com/cloud/utils/ReflectUtil.java
@@ -37,6 +37,10 @@ import java.util.Set;
 
 import org.apache.log4j.Logger;
 import org.reflections.Reflections;
+import org.reflections.util.ConfigurationBuilder;
+import org.reflections.util.ClasspathHelper;
+import org.reflections.scanners.SubTypesScanner;
+import org.reflections.scanners.TypeAnnotationsScanner;
 
 import com.google.common.collect.ImmutableSet;
 
@@ -45,6 +49,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
 public class ReflectUtil {
 
     private static final Logger s_logger = Logger.getLogger(ReflectUtil.class);
+    private static final Logger logger = Logger.getLogger(Reflections.class);
 
     public static Pair<Class<?>, Field> getAnyField(Class<?> clazz, String fieldName) {
         try {
@@ -65,10 +70,13 @@ public class ReflectUtil {
     public static Set<Class<?>> getClassesWithAnnotation(Class<? extends Annotation> annotation, String[] packageNames) {
         Reflections reflections;
         Set<Class<?>> classes = new HashSet<Class<?>>();
+        ConfigurationBuilder builder=new ConfigurationBuilder();
         for (String packageName : packageNames) {
-            reflections = new Reflections(packageName);
-            classes.addAll(reflections.getTypesAnnotatedWith(annotation));
+             builder.addUrls(ClasspathHelper.forPackage(packageName));
         }
+        builder.setScanners(new SubTypesScanner(),new TypeAnnotationsScanner());
+        reflections = new Reflections(builder);
+        classes.addAll(reflections.getTypesAnnotatedWith(annotation));
         return classes;
     }
 


[06/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Change tests random number generator to /dev/urandom
This fixes a big performance issue with random number generation with more recent kernels and java versions in linux


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/82847d44
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/82847d44
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/82847d44

Branch: refs/heads/feature/vpc-ipv6
Commit: 82847d4405f20920a1ca066765071d485931ff19
Parents: 900b656
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Fri May 29 20:24:41 2015 +0200
Committer: Rafael da Fonseca <rs...@gmail.com>
Committed: Fri May 29 20:24:41 2015 +0200

----------------------------------------------------------------------
 pom.xml        | 3 +++
 server/pom.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/82847d44/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 47dc2cd..0af7062 100644
--- a/pom.xml
+++ b/pom.xml
@@ -963,6 +963,9 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-failsafe-plugin</artifactId>
           <version>2.18.1</version>
+          <configuration>
+              <argLine>-Djava.security.egd=file:/dev/./urandom</argLine>
+          </configuration>
         </plugin>
       </plugins>
     </pluginManagement>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/82847d44/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index 504bedc..e68e678 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -174,7 +174,7 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
-          <argLine>-Xmx1024m</argLine>
+          <argLine>-Xmx1024m -Djava.security.egd=file:/dev/./urandom</argLine>
           <excludes>
             <exclude>%regex[.*[0-9]*To[0-9]*.*Test.*]</exclude>
             <exclude>com/cloud/upgrade/AdvanceZone223To224UpgradeTest</exclude>


[13/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/108a74a6/tools/marvin/marvin/lib/base.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index 561d7e1..31e6def 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -118,7 +118,6 @@ class Account:
         if "userUUID" in services:
             cmd.userid = "-".join([services["userUUID"], random_gen()])
 
-
         if domainid:
             cmd.domainid = domainid
         account = apiclient.createAccount(cmd)
@@ -599,7 +598,7 @@ class VirtualMachine:
                 if hasattr(self, "projectid"):
                     projectid = self.projectid
                 vms = VirtualMachine.list(apiclient, projectid=projectid,
-				          id=self.id, listAll=True)
+                        id=self.id, listAll=True)
                 validationresult = validateList(vms)
                 if validationresult[0] == FAIL:
                     raise Exception("VM list validation failed: %s" % validationresult[2])
@@ -807,7 +806,7 @@ class Volume:
 
     @classmethod
     def create(cls, apiclient, services, zoneid=None, account=None,
-               domainid=None, diskofferingid=None, projectid=None ,size=None):
+               domainid=None, diskofferingid=None, projectid=None, size=None):
         """Create Volume"""
         cmd = createVolume.createVolumeCmd()
         cmd.name = "-".join([services["diskname"], random_gen()])
@@ -985,6 +984,7 @@ class Volume:
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.migrateVolume(cmd))
 
+
 class Snapshot:
     """Manage Snapshot Lifecycle
     """
@@ -1053,6 +1053,7 @@ class Snapshot:
         except Exception as e:
             return [FAIL, e]
 
+
 class Template:
     """Manage template life cycle"""
 
@@ -1742,6 +1743,7 @@ class FireWallRule:
             cmd.listall = True
         return(apiclient.listFirewallRules(cmd))
 
+
 class Autoscale:
 
     """Manage Auto scale"""
@@ -1957,10 +1959,10 @@ class ServiceOffering:
         if domainid:
             cmd.domainid = domainid
 
-	if tags:
-	    cmd.tags = tags
-	elif "tags" in services:
-	    cmd.tags = services["tags"]
+        if tags:
+            cmd.tags = tags
+        elif "tags" in services:
+            cmd.tags = services["tags"]
 
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return ServiceOffering(apiclient.createServiceOffering(cmd).__dict__)
@@ -2003,10 +2005,10 @@ class DiskOffering:
         if domainid:
             cmd.domainid = domainid
 
-	if tags:
-	    cmd.tags = tags
-	elif "tags" in services:
-	    cmd.tags = services["tags"]
+        if tags:
+            cmd.tags = tags
+        elif "tags" in services:
+            cmd.tags = services["tags"]
 
         if "storagetype" in services:
             cmd.storagetype = services["storagetype"]
@@ -2156,6 +2158,7 @@ class SnapshotPolicy:
             cmd.listall = True
         return(apiclient.listSnapshotPolicies(cmd))
 
+
 class Hypervisor:
     """Manage Hypervisor"""
 
@@ -2515,11 +2518,40 @@ class Host:
     @classmethod
     def reconnect(cls, apiclient, **kwargs):
         """Reconnect the Host"""
-        
+
         cmd = reconnectHost.reconnectHostCmd()
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.reconnectHost(cmd))
 
+    @classmethod
+    def getState(cls, apiclient, hostid, state, resourcestate, timeout=600):
+        """List Host and check if its resource state is as expected
+        @returnValue - List[Result, Reason]
+                       1) Result - FAIL if there is any exception
+                       in the operation or Host state does not change
+                       to expected state in given time else PASS
+                       2) Reason - Reason for failure"""
+
+        returnValue = [FAIL, "VM state not trasited to %s,\
+                        operation timed out" % state]
+
+        while timeout > 0:
+            try:
+                hosts = Host.list(apiclient,
+                          id=hostid, listall=True)
+                validationresult = validateList(hosts)
+                if validationresult[0] == FAIL:
+                    raise Exception("Host list validation failed: %s" % validationresult[2])
+                elif str(hosts[0].state).lower().decode("string_escape") == str(state).lower() and str(hosts[0].resourcestate).lower().decode("string_escape") == str(resourcestate).lower():
+                    returnValue = [PASS, None]
+                    break
+            except Exception as e:
+                returnValue = [FAIL, e]
+                break
+            time.sleep(60)
+            timeout -= 60
+        return returnValue
+
 class StoragePool:
     """Manage Storage pools (Primary Storage)"""
 
@@ -2624,12 +2656,41 @@ class StoragePool:
         return(apiclient.findStoragePoolsForMigration(cmd))
 
     @classmethod
-    def update(cls,apiclient, **kwargs):
+    def update(cls, apiclient, **kwargs):
         """Update storage pool"""
-        cmd=updateStoragePool.updateStoragePoolCmd()
+        cmd = updateStoragePool.updateStoragePoolCmd()
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return apiclient.updateStoragePool(cmd)
 
+    @classmethod
+    def getState(cls, apiclient, poolid, state, timeout=600):
+        """List StoragePools and check if its  state is as expected
+        @returnValue - List[Result, Reason]
+                       1) Result - FAIL if there is any exception
+                       in the operation or pool state does not change
+                       to expected state in given time else PASS
+                       2) Reason - Reason for failure"""
+
+        returnValue = [FAIL, "VM state not trasited to %s,\
+                        operation timed out" % state]
+
+        while timeout > 0:
+            try:
+                pools = StoragePool.list(apiclient,
+                          id=poolid, listAll=True)
+                validationresult = validateList(pools)
+                if validationresult[0] == FAIL:
+                    raise Exception("Host list validation failed: %s" % validationresult[2])
+                elif str(pools[0].state).lower().decode("string_escape") == str(state).lower():
+                    returnValue = [PASS, None]
+                    break
+            except Exception as e:
+                returnValue = [FAIL, e]
+                break
+            time.sleep(60)
+            timeout -= 60
+        return returnValue
+
 class Network:
     """Manage Network pools"""
 
@@ -4371,6 +4432,7 @@ class VmSnapshot:
     """Manage VM Snapshot life cycle"""
     def __init__(self, items):
         self.__dict__.update(items)
+
     @classmethod
     def create(cls, apiclient, vmid, snapshotmemory="false",
                name=None, description=None):
@@ -4833,5 +4895,3 @@ class StorageNetworkIpRange:
         cmd = listStorageNetworkIpRange.listStorageNetworkIpRangeCmd()
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.listStorageNetworkIpRange(cmd))
-
-


[43/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix performance issue reported by findbugs (unnescessary boxing/unboxing) VmwareResource.java:693, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High VmwareResource.java:4769, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High Boxing/unboxing to parse a primitive com.cloud.hypervisor.vmware.resource.VmwareResource.getNetworkStats(String)

Now op is faster and takes up less memory

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #365


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/38c269d7
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/38c269d7
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/38c269d7

Branch: refs/heads/feature/vpc-ipv6
Commit: 38c269d71e40200cd27bfd9d1187ec25fcd8b6fa
Parents: 65383fb
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:23:09 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:32:52 2015 +0200

----------------------------------------------------------------------
 .../com/cloud/hypervisor/vmware/resource/VmwareResource.java | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/38c269d7/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 83c17dc..f7c6db3 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -690,8 +690,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 String[] splitResult = result.split(":");
                 int i = 0;
                 while (i < splitResult.length - 1) {
-                    stats[0] += (new Long(splitResult[i++])).longValue();
-                    stats[1] += (new Long(splitResult[i++])).longValue();
+                    stats[0] += Long.parseLong(splitResult[i++]);
+                    stats[1] += Long.parseLong(splitResult[i++]);
                 }
                 return new NetworkUsageAnswer(cmd, "success", stats[0], stats[1]);
             }
@@ -4766,8 +4766,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 String[] splitResult = result.split(":");
                 int i = 0;
                 while (i < splitResult.length - 1) {
-                    stats[0] += (new Long(splitResult[i++])).longValue();
-                    stats[1] += (new Long(splitResult[i++])).longValue();
+                    stats[0] += Long.parseLong(splitResult[i++]);
+                    stats[1] += Long.parseLong(splitResult[i++]);
                 }
             } catch (Throwable e) {
                 s_logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e);


[05/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Cleanup awsapi leftovers

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #328


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1958a8e9
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1958a8e9
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1958a8e9

Branch: refs/heads/feature/vpc-ipv6
Commit: 1958a8e9c1edcfd4522ff4ee91e236a510d6c7bc
Parents: ab3b3c7
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Fri May 29 13:02:55 2015 -0400
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Fri May 29 19:15:51 2015 +0200

----------------------------------------------------------------------
 client/tomcatconf/server7-ssl.xml.in   | 106 ----------------------------
 debian/rules                           |   1 -
 packaging/centos63/cloud.spec          |   4 --
 packaging/centos7/cloud.spec           |   4 --
 packaging/fedora20/cloud.spec          |   4 --
 packaging/fedora21/cloud.spec          |   4 --
 python/lib/cloudutils/serviceConfig.py |   2 +-
 tools/devcloud/src/Vagrantfile         |   1 -
 8 files changed, 1 insertion(+), 125 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/client/tomcatconf/server7-ssl.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/server7-ssl.xml.in b/client/tomcatconf/server7-ssl.xml.in
index 2c30651..2633bca 100755
--- a/client/tomcatconf/server7-ssl.xml.in
+++ b/client/tomcatconf/server7-ssl.xml.in
@@ -154,110 +154,4 @@
       </Host>
     </Engine>
   </Service>
-
-  <Service name="Catalina7080">
-
-    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
-    <Executor name="tomcatThreadPool-internal" namePrefix="catalina-exec-int-"
-        maxThreads="150" minSpareThreads="25"/>
-
-    <!-- A "Connector" represents an endpoint by which requests are received
-         and responses are returned. Documentation at :
-         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
-         Java AJP  Connector: /docs/config/ajp.html
-         APR (HTTP/AJP) Connector: /docs/apr.html
-         Define a non-SSL HTTP/1.1 Connector on port 8080
-    -->
-    <!--
-    <Connector port="8080" protocol="HTTP/1.1"
-               connectionTimeout="20000"
-               redirectPort="8443" />
-    -->
-    <!-- A "Connector" using the shared thread pool-->
-    <!--
-    <Connector executor="tomcatThreadPool"
-               port="8080" protocol="org.apache.coyote.http11.Http11NioProtocol"
-               connectionTimeout="20000" disableUploadTimeout="true"
-               acceptCount="150" enableLookups="false" maxThreads="150"
-               maxHttpHeaderSize="8192" redirectPort="8443" />
-    -->
-    <!-- Define a SSL HTTP/1.1 Connector on port 8443
-         This connector uses the JSSE configuration, when using APR, the
-         connector should be using the OpenSSL style configuration
-         described in the APR documentation -->
-    <!--
-    <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true"
-               clientAuth="false" sslProtocol="TLS"
-               keystoreType="PKCS12"
-	       keystoreFile="conf/cloud-localhost.pk12"
-	       keystorePass="password"
-               />
-    -->
-
-    <!-- Listen on 6443 instead of 8443 because tomcat6 will change 8443 to a random one when CATALINA_HOME is not /usr/share/tomcat6 -->
-    <Connector executor="tomcatThreadPool-internal" port="5443" protocol="org.apache.coyote.http11.Http11NioProtocol" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true"
-               clientAuth="false" sslProtocol="TLS"
-               keystoreType="JKS"
-               keystoreFile="/etc/cloudstack/management/cloudmanagementserver.keystore"
-               keystorePass="vmops.com"/>
-
-    <!-- Define an AJP 1.3 Connector on port 20400 -->
-    <Connector port="20400" protocol="AJP/1.3" redirectPort="6443" />
-
-
-    <!-- An Engine represents the entry point (within Catalina) that processes
-         every request.  The Engine implementation for Tomcat stand alone
-         analyzes the HTTP headers included with the request, and passes them
-         on to the appropriate Host (virtual host).
-         Documentation at /docs/config/engine.html -->
-
-    <!-- You should set jvmRoute to support load-balancing via AJP ie :
-    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-    -->
-    <Engine name="Catalina7080" defaultHost="localhost">
-
-      <!--For clustering, please take a look at documentation at:
-          /docs/cluster-howto.html  (simple how to)
-          /docs/config/cluster.html (reference documentation) -->
-      <!--
-      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-      -->
-
-      <!-- The request dumper valve dumps useful debugging information about
-           the request and response data received and sent by Tomcat.
-           Documentation at: /docs/config/valve.html -->
-      <!--
-      <Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-      -->
-
-      <!-- This Realm uses the UserDatabase configured in the global JNDI
-           resources under the key "UserDatabase".  Any edits
-           that are performed against this UserDatabase are immediately
-           available for use by the Realm.  -->
-      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
-             resourceName="UserDatabase"/>
-
-      <!-- Define the default virtual host
-           Note: XML Schema validation will not work with Xerces 2.2.
-       -->
-      <Host name="localhost"  appBase="webapps7080"
-            unpackWARs="true" autoDeploy="true"
-            xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- SingleSignOn valve, share authentication between web applications
-             Documentation at: /docs/config/valve.html -->
-        <!--
-        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-        -->
-
-        <!-- Access log processes all example.
-             Documentation at: /docs/config/valve.html -->
-        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
-               prefix="access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-
-      </Host>
-    </Engine>
-  </Service>
 </Server>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/debian/rules
----------------------------------------------------------------------
diff --git a/debian/rules b/debian/rules
index 66a3623..888cf5b 100755
--- a/debian/rules
+++ b/debian/rules
@@ -56,7 +56,6 @@ override_dh_auto_install:
 	mkdir -p $(DESTDIR)/$(SYSCONFDIR)/sudoers.d/
 	mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management
 	mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client
-	mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps7080
 	mkdir $(DESTDIR)/usr/share/$(PACKAGE)-management/setup
 	mkdir $(DESTDIR)/var/log/$(PACKAGE)/management
 	mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/packaging/centos63/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec
index afa29b8..4154560 100644
--- a/packaging/centos63/cloud.spec
+++ b/packaging/centos63/cloud.spec
@@ -397,10 +397,6 @@ if [ "$1" == "1" ] ; then
     /sbin/chkconfig --level 345 cloudstack-management on > /dev/null 2>&1 || true
 fi
 
-if [ -d "%{_datadir}/%{name}-management" ] ; then
-   ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
-fi
-
 if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
     echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in 
     echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/packaging/centos7/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec
index 71034fc..d9ed26e 100644
--- a/packaging/centos7/cloud.spec
+++ b/packaging/centos7/cloud.spec
@@ -362,10 +362,6 @@ if [ "$1" == "1" ] ; then
     /usr/bin/systemctl on cloudstack-management > /dev/null 2>&1 || true
 fi
 
-if [ -d "%{_datadir}/%{name}-management" ] ; then
-   ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
-fi
-
 if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
     echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in 
     echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/packaging/fedora20/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/fedora20/cloud.spec b/packaging/fedora20/cloud.spec
index f6aa109..26251ee 100644
--- a/packaging/fedora20/cloud.spec
+++ b/packaging/fedora20/cloud.spec
@@ -398,10 +398,6 @@ if [ "$1" == "1" ] ; then
     /sbin/chkconfig --level 345 cloudstack-management on > /dev/null 2>&1 || true
 fi
 
-if [ -d "%{_datadir}/%{name}-management" ] ; then
-   ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
-fi
-
 if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
     echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in 
     echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/packaging/fedora21/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/fedora21/cloud.spec b/packaging/fedora21/cloud.spec
index 71f7a9d..cad17ed 100644
--- a/packaging/fedora21/cloud.spec
+++ b/packaging/fedora21/cloud.spec
@@ -398,10 +398,6 @@ if [ "$1" == "1" ] ; then
     /sbin/chkconfig --level 345 cloudstack-management on > /dev/null 2>&1 || true
 fi
 
-if [ -d "%{_datadir}/%{name}-management" ] ; then
-   ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
-fi
-
 if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
     echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in 
     echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/python/lib/cloudutils/serviceConfig.py
----------------------------------------------------------------------
diff --git a/python/lib/cloudutils/serviceConfig.py b/python/lib/cloudutils/serviceConfig.py
index 86f5a90..03891ea 100755
--- a/python/lib/cloudutils/serviceConfig.py
+++ b/python/lib/cloudutils/serviceConfig.py
@@ -746,7 +746,7 @@ class firewallConfigServer(firewallConfigBase):
         if self.syscfg.env.svrMode == "myCloud":
             self.ports = "443 8080 8250 8443 9090".split()
         else:
-            self.ports = "8080 7080 8250 9090".split()
+            self.ports = "8080 8250 9090".split()
 
 class ubuntuFirewallConfigServer(firewallConfigServer):
     def allowPort(self, port):

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1958a8e9/tools/devcloud/src/Vagrantfile
----------------------------------------------------------------------
diff --git a/tools/devcloud/src/Vagrantfile b/tools/devcloud/src/Vagrantfile
index 3f0d904..46bae93 100644
--- a/tools/devcloud/src/Vagrantfile
+++ b/tools/devcloud/src/Vagrantfile
@@ -40,7 +40,6 @@ Vagrant::Config.run do |config|
   config.vm.forward_port 8787, 8787
   config.vm.forward_port 8250, 8250
   config.vm.forward_port 8096, 8096
-  config.vm.forward_port 7080, 7080
   # Ensure the VM has the right virtual resources
   #config.vm.
   config.vm.customize ["modifyvm", :id, "--memory", 2048]


[36/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Renaming the variable from "s" to "script"

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/7f4e2c7c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/7f4e2c7c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/7f4e2c7c

Branch: refs/heads/feature/vpc-ipv6
Commit: 7f4e2c7cf0a6d1688c783f6d3ae4a4b3fef254ec
Parents: 6138d9a
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:47:00 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:05 2015 +0200

----------------------------------------------------------------------
 .../org/apache/cloudstack/utils/qemu/QemuImg.java | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7f4e2c7c/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
index 802bc9d..7caab74 100644
--- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
+++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
@@ -224,29 +224,29 @@ public class QemuImg {
      * @return void
      */
     public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map<String, String> options) throws QemuImgException {
-        final Script s = new Script(_qemuImgPath, timeout);
-        s.add("convert");
+        final Script script = new Script(_qemuImgPath, timeout);
+        script.add("convert");
         // autodetect source format. Sometime int he future we may teach KVMPhysicalDisk about more formats, then we can explicitly pass them if necessary
         //s.add("-f");
         //s.add(srcFile.getFormat().toString());
-        s.add("-O");
-        s.add(destFile.getFormat().toString());
+        script.add("-O");
+        script.add(destFile.getFormat().toString());
 
         if (options != null && !options.isEmpty()) {
-            s.add("-o");
+            script.add("-o");
             final StringBuffer optionsBuffer = new StringBuffer();
             for (final Map.Entry<String, String> option : options.entrySet()) {
                 optionsBuffer.append(option.getKey()).append('=').append(option.getValue()).append(',');
             }
             String optionsStr = optionsBuffer.toString();
             optionsStr = optionsStr.replaceAll(",$", "");
-            s.add(optionsStr);
+            script.add(optionsStr);
         }
 
-        s.add(srcFile.getFileName());
-        s.add(destFile.getFileName());
+        script.add(srcFile.getFileName());
+        script.add(destFile.getFileName());
 
-        final String result = s.execute();
+        final String result = script.execute();
         if (result != null) {
             throw new QemuImgException(result);
         }


[38/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Using a try-wioth resrouce block as suggested in @DaanHoogland review.

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #355


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/93845afd
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/93845afd
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/93845afd

Branch: refs/heads/feature/vpc-ipv6
Commit: 93845afd53fea7f9b9d95e995478054f823aa8b8
Parents: 7f4e2c7
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 11:16:23 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:05 2015 +0200

----------------------------------------------------------------------
 .../com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/93845afd/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index d785293..0ad2b30 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -59,7 +59,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 import org.apache.cloudstack.utils.qemu.QemuImgException;
 import org.apache.cloudstack.utils.qemu.QemuImgFile;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
 import org.apache.log4j.Logger;
 import org.libvirt.Connect;
 import org.libvirt.Domain;
@@ -543,14 +542,12 @@ public class KVMStorageProcessor implements StorageProcessor {
                 final Date date = new Date();
                 templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator");
 
-                final FileOutputStream templFo = new FileOutputStream(templateProp);
-                try {
+
+                try(FileOutputStream templFo = new FileOutputStream(templateProp);){
                     templFo.write(templateContent.getBytes());
                     templFo.flush();
                 } catch (final IOException e) {
                     throw e;
-                } finally {
-                    IOUtils.closeQuietly(templFo);
                 }
             }
 


[39/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8308: Updating Disable/enable host testpath as admin user should not be able to deploy vm on disabled host

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #356


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0326fb3b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0326fb3b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0326fb3b

Branch: refs/heads/feature/vpc-ipv6
Commit: 0326fb3b5ca0c1cc0ff1709b713d725ea38a7748
Parents: 93845af
Author: pritisarap12 <pr...@clogeny.com>
Authored: Fri Jun 5 11:52:34 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Fri Jun 5 14:08:56 2015 +0530

----------------------------------------------------------------------
 .../maint/testpath_disable_enable_zone.py       | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0326fb3b/test/integration/component/maint/testpath_disable_enable_zone.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/testpath_disable_enable_zone.py b/test/integration/component/maint/testpath_disable_enable_zone.py
index 91b31f0..ec7c059 100644
--- a/test/integration/component/maint/testpath_disable_enable_zone.py
+++ b/test/integration/component/maint/testpath_disable_enable_zone.py
@@ -1371,7 +1371,6 @@ class TestDisableEnableHost(cloudstackTestCase):
         cmd.resourcestate = DISABLED
         cmd.allocationstate = DISABLE
         self.apiclient.updateHost(cmd)
-
         self.disabledHosts.append(hostid)
 
         hostList = Host.list(self.apiclient, id=hostid)
@@ -1406,15 +1405,16 @@ class TestDisableEnableHost(cloudstackTestCase):
                          "stopped",
                          "verify that vm should stop")
 
-        VirtualMachine.create(
-            self.apiclient,
-            self.testdata["small"],
-            templateid=self.template.id,
-            accountid=self.account.name,
-            domainid=self.account.domainid,
-            serviceofferingid=self.service_offering.id,
-            zoneid=self.zone.id,
-            hostid=hostid)
+        with self.assertRaises(Exception):
+            VirtualMachine.create(
+                self.apiclient,
+                self.testdata["small"],
+                templateid=self.template.id,
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                serviceofferingid=self.service_offering.id,
+                zoneid=self.zone.id,
+                hostid=hostid)
 
         root_volume = list_volumes(
             self.apiclient,


[28/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fixes LB certificate tests failure when the full path has a special character like whitespace or many others.

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #336


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/00884b3b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/00884b3b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/00884b3b

Branch: refs/heads/feature/vpc-ipv6
Commit: 00884b3b0bad1d09d68258663a8f93653dfeb718
Parents: 3d4d152
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sat May 30 20:31:55 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Thu Jun 4 00:11:09 2015 +0200

----------------------------------------------------------------------
 .../cloudstack/network/lb/CertServiceTest.java  | 52 ++++++++++----------
 1 file changed, 27 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/00884b3b/server/test/org/apache/cloudstack/network/lb/CertServiceTest.java
----------------------------------------------------------------------
diff --git a/server/test/org/apache/cloudstack/network/lb/CertServiceTest.java b/server/test/org/apache/cloudstack/network/lb/CertServiceTest.java
index 2c34bff..915f77d 100644
--- a/server/test/org/apache/cloudstack/network/lb/CertServiceTest.java
+++ b/server/test/org/apache/cloudstack/network/lb/CertServiceTest.java
@@ -30,6 +30,7 @@ import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
+import java.net.URLDecoder;
 
 import org.apache.cloudstack.api.command.user.loadbalancer.DeleteSslCertCmd;
 import com.cloud.user.User;
@@ -55,6 +56,7 @@ import com.cloud.user.UserVO;
 import com.cloud.user.dao.AccountDao;
 import com.cloud.utils.db.EntityManager;
 import com.cloud.utils.db.TransactionLegacy;
+import java.nio.charset.Charset;
 
 public class CertServiceTest {
 
@@ -97,9 +99,9 @@ public class CertServiceTest {
 
         TransactionLegacy txn = TransactionLegacy.open("runUploadSslCertWithCAChain");
 
-        String certFile = getClass().getResource("/certs/rsa_ca_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_ca_signed.key").getFile();
-        String chainFile = getClass().getResource("/certs/root_chain.crt").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.key").getFile(),Charset.defaultCharset().name());
+        String chainFile = URLDecoder.decode(getClass().getResource("/certs/root_chain.crt").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -149,8 +151,8 @@ public class CertServiceTest {
 
         TransactionLegacy txn = TransactionLegacy.open("runUploadSslCertSelfSignedWithPassword");
 
-        String certFile = getClass().getResource("/certs/rsa_self_signed_with_pwd.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed_with_pwd.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed_with_pwd.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed_with_pwd.key").getFile(),Charset.defaultCharset().name());
         String password = "test";
 
         String cert = readFileToString(new File(certFile));
@@ -200,8 +202,8 @@ public class CertServiceTest {
 
         TransactionLegacy txn = TransactionLegacy.open("runUploadSslCertSelfSignedNoPassword");
 
-        String certFile = getClass().getResource("/certs/rsa_self_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -243,9 +245,9 @@ public class CertServiceTest {
     public void runUploadSslCertBadChain() throws IOException, IllegalAccessException, NoSuchFieldException {
         Assume.assumeTrue(isOpenJdk() || isJCEInstalled());
 
-        String certFile = getClass().getResource("/certs/rsa_ca_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_ca_signed.key").getFile();
-        String chainFile = getClass().getResource("/certs/rsa_self_signed.crt").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.key").getFile(),Charset.defaultCharset().name());
+        String chainFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.crt").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -295,9 +297,9 @@ public class CertServiceTest {
 
         Assume.assumeTrue(isOpenJdk() || isJCEInstalled());
 
-        String certFile = getClass().getResource("/certs/rsa_ca_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_ca_signed.key").getFile();
-        String chainFile = getClass().getResource("/certs/non_root.crt").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_ca_signed.key").getFile(),Charset.defaultCharset().name());
+        String chainFile = URLDecoder.decode(getClass().getResource("/certs/non_root.crt").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -346,8 +348,8 @@ public class CertServiceTest {
     @Test
     public void runUploadSslCertBadPassword() throws IOException, IllegalAccessException, NoSuchFieldException {
 
-        String certFile = getClass().getResource("/certs/rsa_self_signed_with_pwd.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed_with_pwd.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed_with_pwd.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed_with_pwd.key").getFile(),Charset.defaultCharset().name());
         String password = "bad_password";
 
         String cert = readFileToString(new File(certFile));
@@ -395,8 +397,8 @@ public class CertServiceTest {
     @Test
     public void runUploadSslCertBadkeyPair() throws IOException, IllegalAccessException, NoSuchFieldException {
         // Reading appropritate files
-        String certFile = getClass().getResource("/certs/rsa_self_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/non_root.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/non_root.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -438,8 +440,8 @@ public class CertServiceTest {
     public void runUploadSslCertBadkeyAlgo() throws IOException, IllegalAccessException, NoSuchFieldException {
 
         // Reading appropritate files
-        String certFile = getClass().getResource("/certs/rsa_self_signed.crt").getFile();
-        String keyFile = getClass().getResource("/certs/dsa_self_signed.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/dsa_self_signed.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -482,8 +484,8 @@ public class CertServiceTest {
     public void runUploadSslCertExpiredCert() throws IOException, IllegalAccessException, NoSuchFieldException {
 
         // Reading appropritate files
-        String certFile = getClass().getResource("/certs/expired_cert.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/expired_cert.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -525,8 +527,8 @@ public class CertServiceTest {
     @Test
     public void runUploadSslCertNotX509() throws IOException, IllegalAccessException, NoSuchFieldException {
         // Reading appropritate files
-        String certFile = getClass().getResource("/certs/non_x509_pem.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/non_x509_pem.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));
@@ -569,8 +571,8 @@ public class CertServiceTest {
     public void runUploadSslCertBadFormat() throws IOException, IllegalAccessException, NoSuchFieldException {
 
         // Reading appropritate files
-        String certFile = getClass().getResource("/certs/bad_format_cert.crt").getFile();
-        String keyFile = getClass().getResource("/certs/rsa_self_signed.key").getFile();
+        String certFile = URLDecoder.decode(getClass().getResource("/certs/bad_format_cert.crt").getFile(),Charset.defaultCharset().name());
+        String keyFile = URLDecoder.decode(getClass().getResource("/certs/rsa_self_signed.key").getFile(),Charset.defaultCharset().name());
 
         String cert = readFileToString(new File(certFile));
         String key = readFileToString(new File(keyFile));


[07/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
And now with the argLine in the right plugin :)


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9030036b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9030036b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9030036b

Branch: refs/heads/feature/vpc-ipv6
Commit: 9030036bb16f5eb37cf904f7c8c1bb722c774847
Parents: 82847d4
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Fri May 29 20:43:33 2015 +0200
Committer: Rafael da Fonseca <rs...@gmail.com>
Committed: Fri May 29 20:43:33 2015 +0200

----------------------------------------------------------------------
 pom.xml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9030036b/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0af7062..6e3730b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -958,14 +958,14 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
           <version>2.18.1</version>
+          <configuration>
+              <argLine>-Djava.security.egd=file:/dev/./urandom</argLine>
+          </configuration>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-failsafe-plugin</artifactId>
           <version>2.18.1</version>
-          <configuration>
-              <argLine>-Djava.security.egd=file:/dev/./urandom</argLine>
-          </configuration>
         </plugin>
       </plugins>
     </pluginManagement>


[03/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
utils: add findCookie value by key helping method in HttpUtils

finds cookie value from an array of cookie by key name

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>
(cherry picked from commit 78ea36d099371b9a59cbf1e3efd48b853ecc37ca)
Signed-off-by: Rohit Yadav <ro...@shapeblue.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ca3ac685
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ca3ac685
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ca3ac685

Branch: refs/heads/feature/vpc-ipv6
Commit: ca3ac68517199523f338b95e60f65b415039088a
Parents: 3e21c9b
Author: Rohit Yadav <ro...@shapeblue.com>
Authored: Fri May 29 15:27:31 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Fri May 29 15:42:21 2015 +0200

----------------------------------------------------------------------
 utils/src/com/cloud/utils/HttpUtils.java | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ca3ac685/utils/src/com/cloud/utils/HttpUtils.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/HttpUtils.java b/utils/src/com/cloud/utils/HttpUtils.java
index 58768dc..2940985 100644
--- a/utils/src/com/cloud/utils/HttpUtils.java
+++ b/utils/src/com/cloud/utils/HttpUtils.java
@@ -21,6 +21,7 @@ package com.cloud.utils;
 
 import org.apache.log4j.Logger;
 
+import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServletResponse;
 import java.io.IOException;
 
@@ -49,6 +50,15 @@ public class HttpUtils {
         }
     }
 
+    public static String findCookie(final Cookie[] cookies, final String key) {
+        for (Cookie cookie: cookies) {
+            if (cookie != null && cookie.getName().equals(key)) {
+                return cookie.getValue();
+            }
+        }
+        return null;
+    }
+
     public static void writeHttpResponse(final HttpServletResponse resp, final String response,
                                          final Integer responseCode, final String responseType, final String jsonContentType) {
         try {


[37/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Coverity issue 1116677 - Avoiding catching only Exception. Makes the code too britle. - Catching the QemuImgException and throwing it to be caught further in the code - Surrounding the output stream with try/catch and throwing it to be further handled in the code. Closing the output stream quietly.

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9ff38486
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9ff38486
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9ff38486

Branch: refs/heads/feature/vpc-ipv6
Commit: 9ff38486a1e5c10f54b6f8f1c64fad0bccc6817e
Parents: 6271663
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:38:06 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:05 2015 +0200

----------------------------------------------------------------------
 .../kvm/storage/KVMStorageProcessor.java        | 25 ++++++++++++++++----
 1 file changed, 20 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9ff38486/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 201659d..d785293 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -59,6 +59,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 import org.apache.cloudstack.utils.qemu.QemuImgException;
 import org.apache.cloudstack.utils.qemu.QemuImgFile;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
 import org.apache.log4j.Logger;
 import org.libvirt.Connect;
 import org.libvirt.Domain;
@@ -525,8 +526,10 @@ public class KVMStorageProcessor implements StorageProcessor {
                 try {
                     q.convert(srcFile, destFile);
                 } catch (final QemuImgException e) {
-                    s_logger.error("Failed to create new template while converting " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " +
-                            e.getMessage());
+                    final String message = "Failed to create new template while converting " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " +
+                            e.getMessage();
+
+                    throw new QemuImgException(message);
                 }
 
                 final File templateProp = new File(tmpltPath + "/template.properties");
@@ -541,9 +544,14 @@ public class KVMStorageProcessor implements StorageProcessor {
                 templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator");
 
                 final FileOutputStream templFo = new FileOutputStream(templateProp);
-                templFo.write(templateContent.getBytes());
-                templFo.flush();
-                templFo.close();
+                try {
+                    templFo.write(templateContent.getBytes());
+                    templFo.flush();
+                } catch (final IOException e) {
+                    throw e;
+                } finally {
+                    IOUtils.closeQuietly(templFo);
+                }
             }
 
             final Map<String, Object> params = new HashMap<String, Object>();
@@ -566,6 +574,13 @@ public class KVMStorageProcessor implements StorageProcessor {
             newTemplate.setFormat(ImageFormat.QCOW2);
             newTemplate.setName(templateName);
             return new CopyCmdAnswer(newTemplate);
+
+        } catch (final QemuImgException e) {
+            s_logger.error(e.getMessage());
+            return new CopyCmdAnswer(e.toString());
+        } catch (final IOException e) {
+            s_logger.debug("Failed to createTemplateFromVolume: ", e);
+            return new CopyCmdAnswer(e.toString());
         } catch (final Exception e) {
             s_logger.debug("Failed to createTemplateFromVolume: ", e);
             return new CopyCmdAnswer(e.toString());


[08/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
template, api: fix format desc, add OVA format

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #331


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/666263e8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/666263e8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/666263e8

Branch: refs/heads/feature/vpc-ipv6
Commit: 666263e84de3959866592ed68010b394d36229a2
Parents: 1958a8e
Author: René Moser <ma...@renemoser.net>
Authored: Fri May 29 20:44:12 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Fri May 29 21:02:03 2015 +0200

----------------------------------------------------------------------
 .../cloudstack/api/command/user/template/RegisterTemplateCmd.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/666263e8/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
index 68dbdf5..8de30f6 100644
--- a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
@@ -65,7 +65,7 @@ public class RegisterTemplateCmd extends BaseCmd {
     @Parameter(name = ApiConstants.FORMAT,
                type = CommandType.STRING,
                required = true,
-               description = "the format for the template. Possible values include QCOW2, RAW, and VHD.")
+               description = "the format for the template. Possible values include QCOW2, RAW, VHD and OVA.")
     private String format;
 
     @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, required = true, description = "the target hypervisor for the template")


[33/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Coverity issue 1116509 - Assigning the the new returned ResultSet to the rs variable in order to get it closed in the finally block

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/c3b4c7a9
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/c3b4c7a9
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/c3b4c7a9

Branch: refs/heads/feature/vpc-ipv6
Commit: c3b4c7a9cff3f7ecdc0a2465173f56152cc6f9ef
Parents: abe0990
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:18:17 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:04 2015 +0200

----------------------------------------------------------------------
 engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c3b4c7a9/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
index 653124f..bc9c2f0 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
@@ -108,7 +108,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
                         rs.close();
                         pstmt.close();
                         pstmt = conn.prepareStatement("SELECT is_static_nat from firewall_rules");
-                        pstmt.executeQuery();
+                        rs = pstmt.executeQuery();
                         return "2.2.1";
                     } catch (final SQLException e) {
                         s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");


[18/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Modified test_lb_secondary_ip.py file: Local variable accessed as class variable

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #344


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/d6052a31
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/d6052a31
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/d6052a31

Branch: refs/heads/feature/vpc-ipv6
Commit: d6052a31a330f37afd4d05efd89b289966aa23fb
Parents: 06f9ee6
Author: pritisarap12 <pr...@clogeny.com>
Authored: Tue Jun 2 13:19:19 2015 +0530
Committer: Daan Hoogland <da...@gmail.com>
Committed: Tue Jun 2 12:33:06 2015 +0200

----------------------------------------------------------------------
 test/integration/component/test_lb_secondary_ip.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/d6052a31/test/integration/component/test_lb_secondary_ip.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_lb_secondary_ip.py b/test/integration/component/test_lb_secondary_ip.py
index 03551ab..e1d885d 100644
--- a/test/integration/component/test_lb_secondary_ip.py
+++ b/test/integration/component/test_lb_secondary_ip.py
@@ -1984,7 +1984,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
         defaultEthernetDevice = str(response[2].split()[0])
 
         cmd = "ip addr add {0}/24 broadcast {0} dev {1}".format(
-            self.secondaryip.ipaddress, defaultEthernetDevice)
+            secondaryip.ipaddress, defaultEthernetDevice)
         sshClient.execute(cmd)
         sshClient.execute("ip addr show")
 


[42/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix another findbugs high priority warning JuniperSrxResource.java:2617, ES_COMPARING_STRINGS_WITH_EQ, Priority: High Comparison of String objects using == or != in com.cloud.network.resource.JuniperSrxResource.manageApplication(JuniperSrxResource$SecurityPolicyType, JuniperSrxResource$SrxCommand, JuniperSrxResource$Protocol, int, int)

This now correctly compares strings

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #364


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/65383fb8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/65383fb8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/65383fb8

Branch: refs/heads/feature/vpc-ipv6
Commit: 65383fb8fa2d927cccf0357359d5fa41dec7aed6
Parents: 1ca74da
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:18:11 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:30:38 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/network/resource/JuniperSrxResource.java         | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/65383fb8/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
index b2f8596..2133937 100644
--- a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
+++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java
@@ -2614,7 +2614,7 @@ public class JuniperSrxResource implements ServerResource {
                 xml = SrxXml.APPLICATION_ADD.getXml();
                 xml = replaceXmlValue(xml, "name", applicationName);
                 xml = replaceXmlValue(xml, "protocol", protocol.toString());
-                if (protocol.toString() == Protocol.icmp.toString()) {
+                if (protocol.toString().equals(Protocol.icmp.toString())) {
                     icmpOrDestPort = "<icmp-type>" + startPort + "</icmp-type>";
                     icmpOrDestPort += "<icmp-code>" + endPort + "</icmp-code>";
                 } else {


[09/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8231: Fixed UI empty drop-down list for LB rules

This closes #302

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/94f1ebb6
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/94f1ebb6
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/94f1ebb6

Branch: refs/heads/feature/vpc-ipv6
Commit: 94f1ebb60af320166206c213e18b97a177bc145a
Parents: 666263e
Author: Vadim Kimlaychuk <va...@ant.ee>
Authored: Fri May 29 21:18:49 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Fri May 29 21:18:49 2015 +0200

----------------------------------------------------------------------
 ui/scripts/network.js | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/94f1ebb6/ui/scripts/network.js
----------------------------------------------------------------------
diff --git a/ui/scripts/network.js b/ui/scripts/network.js
index d857b73..4e337fb 100755
--- a/ui/scripts/network.js
+++ b/ui/scripts/network.js
@@ -1703,8 +1703,9 @@
                                                             name: 'source',
                                                             description: _l('label.lb.algorithm.source')
                                                         }];
-                                                    if (typeof args.context == 'undefined') {
-                                                        data = getLBAlgorithms(args.context.networks[0]);
+                                                    if (typeof args.context != 'undefined') {
+                                                        var lbAlgs = getLBAlgorithms(args.context.networks[0]);
+                                                        data = (lbAlgs.length == 0) ? data : lbAlgs;
                                                     }
                                                     args.response.success({
                                                         data: data
@@ -3551,8 +3552,9 @@
                                                             name: 'source',
                                                             description: _l('label.lb.algorithm.source')
                                                         }];
-                                                    if (typeof args.context == 'undefined') {
-                                                        data = getLBAlgorithms(args.context.networks[0]);
+                                                    if (typeof args.context != 'undefined') {
+                                                        var lbAlgs = getLBAlgorithms(args.context.networks[0]);
+                                                        data = (lbAlgs.length == 0) ? data : lbAlgs;
                                                     }
                                                     args.response.success({
                                                         data: data


[17/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8534: Fixed import and pep8 issues in test_redundant_router_upgrades.py

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #346


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/06f9ee63
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/06f9ee63
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/06f9ee63

Branch: refs/heads/feature/vpc-ipv6
Commit: 06f9ee63bbdedea3ebd0033b57c4d0294d6f158d
Parents: 8bf59b5
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Tue Jun 2 15:49:36 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Tue Jun 2 15:52:11 2015 +0530

----------------------------------------------------------------------
 .../component/test_redundant_router_upgrades.py | 571 ++++++++++---------
 1 file changed, 290 insertions(+), 281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/06f9ee63/test/integration/component/test_redundant_router_upgrades.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_redundant_router_upgrades.py b/test/integration/component/test_redundant_router_upgrades.py
index 7be1eff..6a0efb0 100644
--- a/test/integration/component/test_redundant_router_upgrades.py
+++ b/test/integration/component/test_redundant_router_upgrades.py
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -17,121 +17,130 @@
 
 
 from nose.plugins.attrib import attr
-from marvin.lib.base import *
-from marvin.lib.utils import *
-from marvin.lib.common import *
-
-#Import Local Modules
+from marvin.lib.base import (Account,
+                             Router,
+                             Network,
+                             VirtualMachine,
+                             ServiceOffering,
+                             NetworkOffering)
+from marvin.lib.utils import cleanup_resources
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template)
+
+# Import Local Modules
 from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.cloudstackAPI import *
+
 
 class Services:
+
     """Test Services for customer defects
     """
 
     def __init__(self):
         self.services = {
-                        "account": {
-                                    "email": "test@test.com",
-                                    "firstname": "Test",
-                                    "lastname": "User",
-                                    "username": "test",
-                                    # Random characters are appended for unique
-                                    # username
-                                    "password": "password",
-                         },
-                        "service_offering": {
-                                    "name": "Tiny Instance",
-                                    "displaytext": "Tiny Instance",
-                                    "cpunumber": 1,
-                                    "cpuspeed": 100,
-                                    "memory": 128,
-                        },
-                        "disk_offering": {
-                                    "displaytext": "Small",
-                                    "name": "Small",
-                                    "disksize": 1
-                        },
-                        "virtual_machine": {
-                                    "displayname": "Test VM",
-                                    "username": "root",
-                                    "password": "password",
-                                    "ssh_port": 22,
-                                    "hypervisor": 'XenServer',
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "protocol": 'TCP',
-                        },
-                        "static_nat": {
-                                    "startport": 22,
-                                    "endport": 22,
-                                    "protocol": "TCP"
-                        },
-                        "network_offering": {
-                                    "name": 'Network offering-RVR services',
-                                    "displaytext": 'Network off-RVR services',
-                                    "guestiptype": 'Isolated',
-                                    "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
-                                    "traffictype": 'GUEST',
-                                    "availability": 'Optional',
-                                    "serviceProviderList": {
-                                            "Vpn": 'VirtualRouter',
-                                            "Dhcp": 'VirtualRouter',
-                                            "Dns": 'VirtualRouter',
-                                            "SourceNat": 'VirtualRouter',
-                                            "PortForwarding": 'VirtualRouter',
-                                            "Firewall": 'VirtualRouter',
-                                            "Lb": 'VirtualRouter',
-                                            "UserData": 'VirtualRouter',
-                                            "StaticNat": 'VirtualRouter',
-                                        },
-                                    "serviceCapabilityList": {
-                                        "SourceNat": {
-                                            "SupportedSourceNatTypes": "peraccount",
-                                            "RedundantRouter": "true",
-                                        },
-                                        "lb": {
-                                               "SupportedLbIsolation": "dedicated"
-                                        },
-                                    },
-                        },
-                        "host": {
-                                 "username": "root",
-                                 "password": "password",
-                                 "publicport": 22,
-                        },
-                        "network": {
-                                  "name": "Test Network",
-                                  "displaytext": "Test Network",
-                                },
-                        "lbrule": {
-                                    "name": "SSH",
-                                    "alg": "roundrobin",
-                                    # Algorithm used for load balancing
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "openfirewall": True,
-                                },
-                        "natrule": {
-                                    "privateport": 22,
-                                    "publicport": 22,
-                                    "protocol": "TCP"
-                                },
-                        "natrule_221": {
-                                    "privateport": 22,
-                                    "publicport": 221,
-                                    "protocol": "TCP"
-                                },
-                        "fw_rule": {
-                                    "startport": 1,
-                                    "endport": 6000,
-                                    "cidr": '55.55.0.0/11',
-                                    # Any network (For creating FW rule)
-                                    "protocol": 'TCP',
-                                },
-                        "ostype": 'CentOS 5.3 (64-bit)',
-                        "sleep": 60,
-            }
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "service_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+            },
+            "disk_offering": {
+                "displaytext": "Small",
+                "name": "Small",
+                "disksize": 1
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "hypervisor": 'XenServer',
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "static_nat": {
+                "startport": 22,
+                "endport": 22,
+                "protocol": "TCP"
+            },
+            "network_offering": {
+                "name": 'Network offering-RVR services',
+                "displaytext": 'Network off-RVR services',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "serviceProviderList": {
+                    "Vpn": 'VirtualRouter',
+                    "Dhcp": 'VirtualRouter',
+                    "Dns": 'VirtualRouter',
+                    "SourceNat": 'VirtualRouter',
+                    "PortForwarding": 'VirtualRouter',
+                    "Firewall": 'VirtualRouter',
+                    "Lb": 'VirtualRouter',
+                    "UserData": 'VirtualRouter',
+                    "StaticNat": 'VirtualRouter',
+                },
+                "serviceCapabilityList": {
+                    "SourceNat": {
+                        "SupportedSourceNatTypes": "peraccount",
+                        "RedundantRouter": "true",
+                    },
+                    "lb": {
+                        "SupportedLbIsolation": "dedicated"
+                    },
+                },
+            },
+            "host": {
+                "username": "root",
+                "password": "password",
+                "publicport": 22,
+            },
+            "network": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+            },
+            "lbrule": {
+                "name": "SSH",
+                "alg": "roundrobin",
+                # Algorithm used for load balancing
+                "privateport": 22,
+                "publicport": 22,
+                "openfirewall": True,
+            },
+            "natrule": {
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": "TCP"
+            },
+            "natrule_221": {
+                "privateport": 22,
+                "publicport": 221,
+                "protocol": "TCP"
+            },
+            "fw_rule": {
+                "startport": 1,
+                "endport": 6000,
+                "cidr": '55.55.0.0/11',
+                # Any network (For creating FW rule)
+                "protocol": 'TCP',
+            },
+            "ostype": 'CentOS 5.3 (64-bit)',
+            "sleep": 60,
+        }
+
 
 class TestRvRUpgradeDowngrade(cloudstackTestCase):
 
@@ -145,35 +154,35 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase):
         cls.domain = get_domain(cls.api_client)
         cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
         cls.template = get_template(
-                            cls.api_client,
-                            cls.zone.id,
-                            cls.services["ostype"]
-                            )
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"]
+        )
         cls.services["virtual_machine"]["zoneid"] = cls.zone.id
         cls.services["virtual_machine"]["template"] = cls.template.id
 
         cls.service_offering = ServiceOffering.create(
-                                            cls.api_client,
-                                            cls.services["service_offering"]
-                                            )
+            cls.api_client,
+            cls.services["service_offering"]
+        )
         cls.network_offering = NetworkOffering.create(
-                                            cls.api_client,
-                                            cls.services["network_offering"],
-                                            conservemode=True
-                                            )
+            cls.api_client,
+            cls.services["network_offering"],
+            conservemode=True
+        )
         # Enable Network offering
         cls.network_offering.update(cls.api_client, state='Enabled')
 
         cls._cleanup = [
-                        cls.service_offering,
-                        cls.network_offering,
-                        ]
+            cls.service_offering,
+            cls.network_offering,
+        ]
         return
 
     @classmethod
     def tearDownClass(cls):
         try:
-            #Cleanup resources used
+            # Cleanup resources used
             cleanup_resources(cls.api_client, cls._cleanup)
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
@@ -183,11 +192,11 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase):
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
         self.account = Account.create(
-                                     self.apiclient,
-                                     self.services["account"],
-                                     admin=True,
-                                     domainid=self.domain.id
-                                     )
+            self.apiclient,
+            self.services["account"],
+            admin=True,
+            domainid=self.domain.id
+        )
         self.cleanup = []
         self.cleanup.insert(0, self.account)
         return
@@ -222,126 +231,126 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase):
         # 5. listRouters shows two routers Up and Running (MASTER and BACKUP)
 
         network_offerings = NetworkOffering.list(
-                            self.apiclient,
-                            name='DefaultIsolatedNetworkOfferingWithSourceNatService',
-                            listall=True
-                         )
+            self.apiclient,
+            name='DefaultIsolatedNetworkOfferingWithSourceNatService',
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(network_offerings, list),
-                    True,
-                    "List network offering should not return empty response"
-                    )
+            isinstance(network_offerings, list),
+            True,
+            "List network offering should not return empty response"
+        )
 
         network_off_vr = network_offerings[0]
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                        network_off_vr.id)
+                   network_off_vr.id)
         network = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=network_off_vr.id,
-                                zoneid=self.zone.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=network_off_vr.id,
+            zoneid=self.zone.id
+        )
         self.debug("Created network with ID: %s" % network.id)
 
         networks = Network.list(
-                                self.apiclient,
-                                id=network.id,
-                                listall=True
-                                )
+            self.apiclient,
+            id=network.id,
+            listall=True
+        )
         self.assertEqual(
             isinstance(networks, list),
             True,
             "List networks should return a valid response for created network"
-             )
+        )
         nw_response = networks[0]
 
         self.debug("Network state: %s" % nw_response.state)
         self.assertEqual(
-                    nw_response.state,
-                    "Allocated",
-                    "The network should be in allocated state after creation"
-                    )
+            nw_response.state,
+            "Allocated",
+            "The network should be in allocated state after creation"
+        )
 
         self.debug("Deploying VM in account: %s" % self.account.name)
 
         # Spawn an instance in that network
         virtual_machine = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network.id)]
+        )
         self.debug("Deployed VM in the account: %s" %
-                                    self.account.name)
+                   self.account.name)
 
         vms = VirtualMachine.list(
-                                  self.apiclient,
-                                  id=virtual_machine.id,
-                                  listall=True
-                                  )
+            self.apiclient,
+            id=virtual_machine.id,
+            listall=True
+        )
         self.assertEqual(
-                         isinstance(vms, list),
-                         True,
-                         "List Vms should return a valid list"
-                         )
+            isinstance(vms, list),
+            True,
+            "List Vms should return a valid list"
+        )
         vm = vms[0]
         self.assertEqual(
-                         vm.state,
-                         "Running",
-                         "Vm should be in running state after deployment"
-                         )
+            vm.state,
+            "Running",
+            "Vm should be in running state after deployment"
+        )
 
         self.debug("Listing routers for account: %s" %
-                                        self.account.name)
+                   self.account.name)
         routers = Router.list(
-                              self.apiclient,
-                              account=self.account.name,
-                              domainid=self.account.domainid,
-                              listall=True
-                              )
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid,
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(routers, list),
-                    True,
-                    "list router should return only one router"
-                    )
+            isinstance(routers, list),
+            True,
+            "list router should return only one router"
+        )
         self.assertEqual(
-                    len(routers),
-                    1,
-                    "Length of the list router should be 1"
-                    )
+            len(routers),
+            1,
+            "Length of the list router should be 1"
+        )
 
         self.debug("Upgrading the network to RVR network offering..")
         try:
             network.update(
-                           self.apiclient,
-                           networkofferingid=self.network_offering.id
-                           )
+                self.apiclient,
+                networkofferingid=self.network_offering.id
+            )
         except Exception as e:
             self.fail("Failed to upgrade the network from VR to RVR: %s" % e)
 
         self.debug("Listing routers for account: %s" %
-                                        self.account.name)
+                   self.account.name)
         routers = Router.list(
-                              self.apiclient,
-                              account=self.account.name,
-                              domainid=self.account.domainid,
-                              listall=True
-                              )
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid,
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(routers, list),
-                    True,
-                    "list router should return two routers"
-                    )
+            isinstance(routers, list),
+            True,
+            "list router should return two routers"
+        )
         self.assertEqual(
-                    len(routers),
-                    2,
-                    "Length of the list router should be 2 (MASTER & BACKUP)"
-                    )
+            len(routers),
+            2,
+            "Length of the list router should be 2 (MASTER & BACKUP)"
+        )
         return
 
     @attr(tags=["advanced", "advancedns", "ssh"], required_hardware="false")
@@ -370,124 +379,124 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase):
 
         # Creating network using the network offering created
         self.debug("Creating network with network offering: %s" %
-                                                    self.network_offering.id)
+                   self.network_offering.id)
         network = Network.create(
-                                self.apiclient,
-                                self.services["network"],
-                                accountid=self.account.name,
-                                domainid=self.account.domainid,
-                                networkofferingid=self.network_offering.id,
-                                zoneid=self.zone.id
-                                )
+            self.apiclient,
+            self.services["network"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            networkofferingid=self.network_offering.id,
+            zoneid=self.zone.id
+        )
         self.debug("Created network with ID: %s" % network.id)
 
         networks = Network.list(
-                                self.apiclient,
-                                id=network.id,
-                                listall=True
-                                )
+            self.apiclient,
+            id=network.id,
+            listall=True
+        )
         self.assertEqual(
             isinstance(networks, list),
             True,
             "List networks should return a valid response for created network"
-             )
+        )
         nw_response = networks[0]
 
         self.debug("Network state: %s" % nw_response.state)
         self.assertEqual(
-                    nw_response.state,
-                    "Allocated",
-                    "The network should be in allocated state after creation"
-                    )
+            nw_response.state,
+            "Allocated",
+            "The network should be in allocated state after creation"
+        )
 
         self.debug("Deploying VM in account: %s" % self.account.name)
 
         # Spawn an instance in that network
         virtual_machine = VirtualMachine.create(
-                                  self.apiclient,
-                                  self.services["virtual_machine"],
-                                  accountid=self.account.name,
-                                  domainid=self.account.domainid,
-                                  serviceofferingid=self.service_offering.id,
-                                  networkids=[str(network.id)]
-                                  )
+            self.apiclient,
+            self.services["virtual_machine"],
+            accountid=self.account.name,
+            domainid=self.account.domainid,
+            serviceofferingid=self.service_offering.id,
+            networkids=[str(network.id)]
+        )
         self.debug("Deployed VM in the account: %s" %
-                                    self.account.name)
+                   self.account.name)
 
         vms = VirtualMachine.list(
-                                  self.apiclient,
-                                  id=virtual_machine.id,
-                                  listall=True
-                                  )
+            self.apiclient,
+            id=virtual_machine.id,
+            listall=True
+        )
         self.assertEqual(
-                         isinstance(vms, list),
-                         True,
-                         "List Vms should return a valid list"
-                         )
+            isinstance(vms, list),
+            True,
+            "List Vms should return a valid list"
+        )
         vm = vms[0]
         self.assertEqual(
-                         vm.state,
-                         "Running",
-                         "Vm should be in running state after deployment"
-                         )
+            vm.state,
+            "Running",
+            "Vm should be in running state after deployment"
+        )
 
         self.debug("Listing routers for account: %s" %
-                                        self.account.name)
+                   self.account.name)
         routers = Router.list(
-                              self.apiclient,
-                              account=self.account.name,
-                              domainid=self.account.domainid,
-                              listall=True
-                              )
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid,
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(routers, list),
-                    True,
-                    "list router should return two routers"
-                    )
+            isinstance(routers, list),
+            True,
+            "list router should return two routers"
+        )
         self.assertEqual(
-                    len(routers),
-                    2,
-                    "Length of the list router should be 2 (MASTER & BACKUP)"
-                    )
+            len(routers),
+            2,
+            "Length of the list router should be 2 (MASTER & BACKUP)"
+        )
 
         network_offerings = NetworkOffering.list(
-                            self.apiclient,
-                            name='DefaultIsolatedNetworkOfferingWithSourceNatService',
-                            listall=True
-                         )
+            self.apiclient,
+            name='DefaultIsolatedNetworkOfferingWithSourceNatService',
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(network_offerings, list),
-                    True,
-                    "List network offering should not return empty response"
-                    )
+            isinstance(network_offerings, list),
+            True,
+            "List network offering should not return empty response"
+        )
 
         network_off_vr = network_offerings[0]
 
         self.debug("Upgrading the network to RVR network offering..")
         try:
             network.update(
-                           self.apiclient,
-                           networkofferingid=network_off_vr.id
-                           )
+                self.apiclient,
+                networkofferingid=network_off_vr.id
+            )
         except Exception as e:
             self.fail("Failed to upgrade the network from VR to RVR: %s" % e)
 
         self.debug("Listing routers for account: %s" %
-                                        self.account.name)
+                   self.account.name)
         routers = Router.list(
-                              self.apiclient,
-                              account=self.account.name,
-                              domainid=self.account.domainid,
-                              listall=True
-                              )
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid,
+            listall=True
+        )
         self.assertEqual(
-                    isinstance(routers, list),
-                    True,
-                    "list router should return only one router"
-                    )
+            isinstance(routers, list),
+            True,
+            "list router should return only one router"
+        )
         self.assertEqual(
-                    len(routers),
-                    1,
-                    "Length of the list router should be 1"
-                    )
+            len(routers),
+            1,
+            "Length of the list router should be 1"
+        )
         return


[48/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix malformed closing tag on comments in cisco .xml files

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #369


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/c5437d21
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/c5437d21
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/c5437d21

Branch: refs/heads/feature/vpc-ipv6
Commit: c5437d216fe6aaf85e35bfc7d85bf3b2f37fa70a
Parents: b9b13d7
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:52:34 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:41:13 2015 +0200

----------------------------------------------------------------------
 .../scripts/network/cisco/create-acl-policy-ref.xml          | 8 ++++----
 .../scripts/network/cisco/create-acl-policy-set.xml          | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/create-acl-policy.xml   | 8 ++++----
 .../scripts/network/cisco/create-acl-rule-for-dnat.xml       | 8 ++++----
 .../scripts/network/cisco/create-acl-rule-for-pf.xml         | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml  | 2 +-
 .../cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml    | 8 ++++----
 .../scripts/network/cisco/create-edge-firewall.xml           | 2 +-
 .../scripts/network/cisco/create-edge-security-profile.xml   | 2 +-
 .../scripts/network/cisco/create-egress-acl-rule.xml         | 8 ++++----
 .../cisco/create-generic-egress-acl-no-protocol-rule.xml     | 8 ++++----
 .../scripts/network/cisco/create-generic-egress-acl-rule.xml | 8 ++++----
 .../network/cisco/create-generic-ingress-acl-rule.xml        | 8 ++++----
 .../scripts/network/cisco/create-ingress-acl-rule.xml        | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/create-ip-pool.xml      | 8 ++++----
 .../scripts/network/cisco/create-nat-policy-ref.xml          | 8 ++++----
 .../scripts/network/cisco/create-nat-policy-set.xml          | 2 +-
 .../cisco-vnmc/scripts/network/cisco/create-pf-rule.xml      | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/create-port-pool.xml    | 8 ++++----
 .../scripts/network/cisco/create-source-nat-pool.xml         | 2 +-
 .../scripts/network/cisco/create-source-nat-rule.xml         | 2 +-
 .../cisco-vnmc/scripts/network/cisco/list-acl-policies.xml   | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/list-children.xml       | 8 ++++----
 .../cisco-vnmc/scripts/network/cisco/list-nat-policies.xml   | 8 ++++----
 .../scripts/network/cisco/list-policyrefs-in-policyset.xml   | 8 ++++----
 25 files changed, 82 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-ref.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-ref.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-ref.xml
index c534c32..4fe3db7 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-ref.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-ref.xml
@@ -32,7 +32,7 @@ under the License.
   </inConfigs>
 </configConfMos>
 
-<!--
-    aclpolicyrefdn="org-root/org-vlan-123/org-VDC-vlan-123/pset-Ingress-ACL-Policy-Set-vlan-123/polref-aaa"
-    aclpolicyname="aaa"
---!>
+<!--
+    aclpolicyrefdn="org-root/org-vlan-123/org-VDC-vlan-123/pset-Ingress-ACL-Policy-Set-vlan-123/polref-aaa"
+    aclpolicyname="aaa"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-set.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-set.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-set.xml
index b475d2c..a04be15 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-set.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy-set.xml
@@ -30,7 +30,7 @@ under the License.
   </inConfigs>
 </configConfMos>
 
-<!--
-    aclpolicysetdn="org-root/org-vlan-123/org-VDC-vlan-123/pset-foo"
-    aclpolicysetname="foo"
---!>
+<!--
+    aclpolicysetdn="org-root/org-vlan-123/org-VDC-vlan-123/pset-foo"
+    aclpolicysetname="foo"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy.xml
index e71cd42..c6f7d37 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-policy.xml
@@ -29,7 +29,7 @@ under the License.
     </inConfig>
 </configConfMo>
 
-<!--
-    aclpolicydn="org-root/org-vlan-123/org-VDC-vlan-123/pol-test_policy"
-    aclpolicyname="test_policy"
---!>
\ No newline at end of file
+<!--
+    aclpolicydn="org-root/org-vlan-123/org-VDC-vlan-123/pol-test_policy"
+    aclpolicyname="test_policy"
+-->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-dnat.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-dnat.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-dnat.xml
index 5b6aaa3..e1ec9ad 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-dnat.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-dnat.xml
@@ -76,7 +76,7 @@ under the License.
 <!--
     aclruledn="org-root/org-vlan-123/org-VDC-vlan-123/pol-test_policy/rule-dummy"
     aclrulename="dummy"
-    descr=value
-    actiontype="drop" or "permit"
-    ip="public ip at destination"
---!>
+    descr=value
+    actiontype="drop" or "permit"
+    ip="public ip at destination"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-pf.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-pf.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-pf.xml
index 1a1d9cb..e0fa2e4 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-pf.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-acl-rule-for-pf.xml
@@ -150,7 +150,7 @@ under the License.
     descr=value
     actiontype="drop" or "permit"
     protocolvalue="TCP" or "UDP"
-    ip="public ip at destination"
-    startport="start port at destination"
-    endport="end port at destination"
---!>
+    ip="public ip at destination"
+    startport="start port at destination"
+    endport="end port at destination"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml
index 5bb4abc..cab9f98 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dhcp-policy.xml
@@ -69,4 +69,4 @@ under the License.
     "org-root/org-TenantC/org-VDC-TenantC/dhcp-server-TenantC-Dhcp-Policy/ip-range-iprange"
     "org-root/org-TestTenant3/org-Tenant3-VDC/dhcp-server-Tenant3-DHCP-Policy/dns-svc-Tenant3-DNS" 
     "org-root/org-TestTenant3/org-Tenant3-VDC/dhcp-server-Tenant3-DHCP-Policy/dns-svc-Tenant3-DNS/dns-8.8.8.8" 
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml
index bd8dbff..308664d 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-dnat-rule.xml
@@ -85,7 +85,7 @@ under the License.
 <!--
     natruledn="org-root/org-vlan-123/org-VDC-vlan-123/natpol-aaa/rule-bbb"
     natrulename="bbb"
-    descr=value
-    ippoolname="ccc"
-    ip="10.147.30.230"
---!>
+    descr=value
+    ippoolname="ccc"
+    ip="10.147.30.230"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-firewall.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-firewall.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-firewall.xml
index e5447e3..d2ab997 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-firewall.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-firewall.xml
@@ -86,4 +86,4 @@ under the License.
     ipAddressPrimary="%publicip%"
     ipSubnet="%outsidesubnet%"
     name="%outsideintfname%
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-security-profile.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-security-profile.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-security-profile.xml
index e2f5eaf..29f7111 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-security-profile.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-edge-security-profile.xml
@@ -38,4 +38,4 @@ under the License.
     egressAclPsetRef="default-egress"
     ingressAclPsetRef="default-ingress"
     name="Tenant3-ESSP"
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml
index f283ffe..d6e4676 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml
@@ -162,7 +162,7 @@ under the License.
     actiontype="drop" or "permit"
     protocolvalue = "TCP" or "UDP"
     sourcestartip="source start ip"
-    sourceendip="source end ip"
-    deststartport="start port at destination"
-    destendport="end port at destination"
---!>
+    sourceendip="source end ip"
+    deststartport="start port at destination"
+    destendport="end port at destination"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml
index e6f4cfb..fac59f3 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml
@@ -88,7 +88,7 @@ under the License.
     aclruledn="org-root/org-vlan-123/org-VDC-vlan-123/pol-test_policy/rule-dummy"
     aclrulename="dummy"
     descr=value
-    actiontype="drop" or "permit"
-    sourcestartip="source start ip"
-    sourceendip="source end ip"
---!>
+    actiontype="drop" or "permit"
+    sourcestartip="source start ip"
+    sourceendip="source end ip"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml
index 55edd1f..7a5b9d8 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml
@@ -115,7 +115,7 @@ under the License.
     aclrulename="dummy"
     descr=value
     actiontype="drop" or "permit"
-    protocolvalue = "TCP" or "UDP" or "ICMP"
-    sourcestartip="source start ip"
-    sourceendip="source end ip"
---!>
+    protocolvalue = "TCP" or "UDP" or "ICMP"
+    sourcestartip="source start ip"
+    sourceendip="source end ip"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-ingress-acl-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-ingress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-ingress-acl-rule.xml
index 7c11641..bd368f5 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-ingress-acl-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-ingress-acl-rule.xml
@@ -115,7 +115,7 @@ under the License.
     aclrulename="dummy"
     descr=value
     actiontype="drop" or "permit"
-    protocolvalue = "TCP" or "UDP" or "ICMP"
-    sourcestartip = "source start IP"
-    sourceendip = "source end IP"
---!>
+    protocolvalue = "TCP" or "UDP" or "ICMP"
+    sourcestartip = "source start IP"
+    sourceendip = "source end IP"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ingress-acl-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ingress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ingress-acl-rule.xml
index f283ffe..d6e4676 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ingress-acl-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ingress-acl-rule.xml
@@ -162,7 +162,7 @@ under the License.
     actiontype="drop" or "permit"
     protocolvalue = "TCP" or "UDP"
     sourcestartip="source start ip"
-    sourceendip="source end ip"
-    deststartport="start port at destination"
-    destendport="end port at destination"
---!>
+    sourceendip="source end ip"
+    deststartport="start port at destination"
+    destendport="end port at destination"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ip-pool.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ip-pool.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ip-pool.xml
index 4cf0451..876fa21 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ip-pool.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-ip-pool.xml
@@ -52,7 +52,7 @@ under the License.
 </configConfMos>
 
 <!--
-    ippooldn="org-root/org-vlan-123/org-VDC-vlan-123/objgrp-ccc"
-    ippoolname="ccc"
-    ipvalue="10.1.1.20"
---!>
\ No newline at end of file
+    ippooldn="org-root/org-vlan-123/org-VDC-vlan-123/objgrp-ccc"
+    ippoolname="ccc"
+    ipvalue="10.1.1.20"
+-->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-ref.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-ref.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-ref.xml
index 450d40c..0e7d6e0 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-ref.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-ref.xml
@@ -32,7 +32,7 @@ under the License.
   </inConfigs>
 </configConfMos>
 
-<!--
-    natpolicyrefdn="org-root/org-TenantD/org-VDC-TenantD/natpset-TenantD-NAT-Policy-Set/polref-Source-NAT-Policy-TenantD"
-    natpolicyname="Source-NAT-Policy-TenantD"
---!>
+<!--
+    natpolicyrefdn="org-root/org-TenantD/org-VDC-TenantD/natpset-TenantD-NAT-Policy-Set/polref-Source-NAT-Policy-TenantD"
+    natpolicyname="Source-NAT-Policy-TenantD"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-set.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-set.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-set.xml
index 090caf1..2e2c63e 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-set.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-nat-policy-set.xml
@@ -34,4 +34,4 @@ under the License.
 <!--
     natpolicysetdn="org-root/org-TenantD/org-VDC-TenantD/natpset-TenantD-NAT-Policy-Set"
     natpolicysetname="Source-NAT-Policy-Set-TenantD"
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-pf-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-pf-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-pf-rule.xml
index a8a631f..4cfecd4 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-pf-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-pf-rule.xml
@@ -160,7 +160,7 @@ under the License.
     ippoolname="ccc"
     portpoolname="ddd"
     ip="10.147.30.230"
-    startport="22"
-    endport="22"
-    protocolvalue="TCP"
---!>
+    startport="22"
+    endport="22"
+    protocolvalue="TCP"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-port-pool.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-port-pool.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-port-pool.xml
index e1b7be0..d218eec 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-port-pool.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-port-pool.xml
@@ -66,7 +66,7 @@ under the License.
 
 <!--
     portpooldn="org-root/org-vlan-123/org-VDC-vlan-123/objgrp-ddd"
-    portpoolname="ddd"
-    startport="22"
-    endport="22"
---!>
+    portpoolname="ddd"
+    startport="22"
+    endport="22"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-pool.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-pool.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-pool.xml
index 2ad1e87..957bc9f 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-pool.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-pool.xml
@@ -55,4 +55,4 @@ under the License.
     publicipdn="org-root/org-TestTenant3/org-Tenant3-VDC/objgrp-Source-NAT-Pool-For-Tenant3/objgrp-expr-2/nw-ip-2"
     snatpooldn= "org-root/org-TestTenant3/org-Tenant3-VDC/objgrp-Source-NAT-Pool-For-Tenant3"
     value="10.223.136.10"
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-rule.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-rule.xml
index a3ee987..9980df1 100644
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-rule.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-source-nat-rule.xml
@@ -100,4 +100,4 @@ under the License.
     ippoolname=value
     srcstartip=value
     srcendip=value
---!>
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-acl-policies.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-acl-policies.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-acl-policies.xml
index aec800e..aee2f89 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-acl-policies.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-acl-policies.xml
@@ -25,7 +25,7 @@ under the License.
   <inFilter>
   </inFilter>
 </orgResolveInScope>
-
-<!--
-    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
---!>
+
+<!--
+    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-children.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-children.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-children.xml
index f272999..a24b1cf 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-children.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-children.xml
@@ -21,7 +21,7 @@ under the License.
   inDn="%dn%"
   inHierarchical="true">
   <inFilter>
-  </inFilter>
-</configResolveChildren>
-
-<!--dn="org-root/org-vlan-517/org-VDC-vlan-517/natpol-DNAT-vlan-517-10-147-30-235"--!>
+  </inFilter>
+</configResolveChildren>
+
+<!--dn="org-root/org-vlan-517/org-VDC-vlan-517/natpol-DNAT-vlan-517-10-147-30-235"-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-nat-policies.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-nat-policies.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-nat-policies.xml
index 720ced0..bd978cb 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-nat-policies.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-nat-policies.xml
@@ -25,7 +25,7 @@ under the License.
   <inFilter>
   </inFilter>
 </orgResolveInScope>
-
-<!--
-    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
---!>
+
+<!--
+    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
+-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c5437d21/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-policyrefs-in-policyset.xml
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-policyrefs-in-policyset.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-policyrefs-in-policyset.xml
index c53af90..cda51c5 100755
--- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-policyrefs-in-policyset.xml
+++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/list-policyrefs-in-policyset.xml
@@ -25,7 +25,7 @@ under the License.
   <inFilter>
   </inFilter>
 </orgResolveInScope>
-
-<!--
-    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
---!>
+
+<!--
+    vdcdn="org-root/org-vlan-123/org-VDC-vlan-123"
+-->


[19/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK:8515: Skipping snapshot test case for HyperV and LXC

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #347


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f2b1ec2c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f2b1ec2c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f2b1ec2c

Branch: refs/heads/feature/vpc-ipv6
Commit: f2b1ec2c7d1a81c60a49545272503c783f7c70bb
Parents: d6052a3
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Tue Jun 2 16:17:17 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Tue Jun 2 16:18:51 2015 +0530

----------------------------------------------------------------------
 test/integration/component/test_project_resources.py | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2b1ec2c/test/integration/component/test_project_resources.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_project_resources.py b/test/integration/component/test_project_resources.py
index 142b08c..0816e9d 100644
--- a/test/integration/component/test_project_resources.py
+++ b/test/integration/component/test_project_resources.py
@@ -41,8 +41,7 @@ from marvin.lib.common import (get_zone,
                                            list_volumes,
                                            list_network_offerings,
                                            list_lb_rules,
-                                           get_free_vlan,
-                                           wait_for_cleanup)
+                                           get_free_vlan)
 
 from marvin.lib.utils import cleanup_resources
 import random
@@ -787,8 +786,10 @@ class TestSnapshots(cloudstackTestCase):
         cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
         cls.services['mode'] = cls.zone.networktype
         cls.hypervisor = cls.testClient.getHypervisorInfo()
-        if cls.hypervisor.lower() in ['lxc']:
-            raise unittest.SkipTest("snapshots are not supported on %s" % cls.hypervisor.lower())
+        cls._cleanup = []
+        cls.snapshotSupported = True
+        if cls.hypervisor.lower() in ['hyperv', 'lxc']:
+            cls.snapshotSupported = False
 
         cls.template = get_template(
                             cls.api_client,
@@ -863,6 +864,9 @@ class TestSnapshots(cloudstackTestCase):
         # 3. Verify snapshot created inside project can only be used in inside
         #    the project
 
+        if not self.snapshotSupported:
+            self.skipTest("Snapshot is not supported on %s" % self.hypervisor)
+
         self.debug("Deploying VM for Project: %s" % self.project.id)
         virtual_machine_1 = VirtualMachine.create(
                                 self.apiclient,


[26/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CID 1302974: Scanner in try-with-resource

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #353


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/109b6e94
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/109b6e94
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/109b6e94

Branch: refs/heads/feature/vpc-ipv6
Commit: 109b6e94d39d7c6db4ff721c4699e42abe47ec5e
Parents: 9c2a1ea
Author: Daan Hoogland <da...@gmail.com>
Authored: Wed Jun 3 17:37:13 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Thu Jun 4 00:09:16 2015 +0200

----------------------------------------------------------------------
 .../src/org/apache/cloudstack/utils/linux/MemStat.java | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/109b6e94/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/MemStat.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/MemStat.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/MemStat.java
index 1e3c872..1d6a4fc 100644
--- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/MemStat.java
+++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/linux/MemStat.java
@@ -16,11 +16,10 @@
 // under the License.
 package org.apache.cloudstack.utils.linux;
 
-import java.util.HashMap;
-import java.util.Map;
-
 import java.io.File;
 import java.io.FileNotFoundException;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Scanner;
 
 public class MemStat {
@@ -29,7 +28,7 @@ public class MemStat {
     protected final static String CACHE_KEY = "Cached";
     protected final static String TOTAL_KEY = "MemTotal";
 
-    private Map<String, Double> _memStats = new HashMap<String, Double>();
+    private final Map<String, Double> _memStats = new HashMap<String, Double>();
 
     public MemStat() {
     }
@@ -51,9 +50,9 @@ public class MemStat {
     }
 
     public void refresh() {
-        try {
-            Scanner fileScanner = new Scanner(new File(MEMINFO_FILE));
-            parseFromScanner(fileScanner);
+        File f = new File(MEMINFO_FILE);
+        try (Scanner scanner = new Scanner(f)) {
+            parseFromScanner(scanner);
         } catch (FileNotFoundException ex) {
             throw new RuntimeException("File " + MEMINFO_FILE + " not found:" + ex.toString());
         }


[40/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Allow PropertiesUtil to read from jar files.

PropertiesUtil has code for reading from jar files, but the
findConfigFile method will prevent it from ever returning a file in a
jar on the classpath since it always wants to have a "file:" URL and
use the File class.

This commit moves the jar file loading attempt from a catch block to
an else clause, executed if a config file:// URL could not be found.

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #358


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/db69c8e8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/db69c8e8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/db69c8e8

Branch: refs/heads/feature/vpc-ipv6
Commit: db69c8e82b555b4fb6d05f059bf64e87189ba6a9
Parents: 0326fb3
Author: jeff <je...@greenqloud.com>
Authored: Thu Jun 4 16:35:34 2015 +0000
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:19:21 2015 +0200

----------------------------------------------------------------------
 utils/src/com/cloud/utils/PropertiesUtil.java | 32 +++++++++++++---------
 1 file changed, 19 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/db69c8e8/utils/src/com/cloud/utils/PropertiesUtil.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/PropertiesUtil.java b/utils/src/com/cloud/utils/PropertiesUtil.java
index fe5a366..4cb89f7 100644
--- a/utils/src/com/cloud/utils/PropertiesUtil.java
+++ b/utils/src/com/cloud/utils/PropertiesUtil.java
@@ -21,7 +21,6 @@ package com.cloud.utils;
 
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URL;
@@ -44,6 +43,7 @@ public class PropertiesUtil {
     public static File findConfigFile(String path) {
         ClassLoader cl = PropertiesUtil.class.getClassLoader();
         URL url = cl.getResource(path);
+
         if (url != null && "file".equals(url.getProtocol())) {
             return new File(url.getFile());
         }
@@ -124,6 +124,15 @@ public class PropertiesUtil {
         return null;
     }
 
+    public static void loadFromJar(Properties properties, String configFile) throws IOException {
+        InputStream stream = PropertiesUtil.openStreamFromURL(configFile);
+        if (stream != null) {
+            properties.load(stream);
+        } else {
+            s_logger.error("Unable to find properties file: " + configFile);
+        }
+    }
+
     // Returns key=value pairs by parsing a commands.properties/config file
     // with syntax; key=cmd;value (with this syntax cmd is stripped) and key=value
     public static Map<String, String> processConfigFile(String[] configFiles) {
@@ -134,22 +143,18 @@ public class PropertiesUtil {
             if (commandsFile != null) {
                 try {
                     loadFromFile(preProcessedCommands, commandsFile);
-                } catch (FileNotFoundException fnfex) {
-                    // in case of a file within a jar in classpath, try to open stream using url
-                    InputStream stream = PropertiesUtil.openStreamFromURL(configFile);
-                    if (stream != null) {
-                        try {
-                            preProcessedCommands.load(stream);
-                        } catch (IOException e) {
-                            s_logger.error("IO Exception, unable to find properties file:", fnfex);
-                        }
-                    } else {
-                        s_logger.error("Unable to find properites file", fnfex);
-                    }
                 } catch (IOException ioe) {
                     s_logger.error("IO Exception loading properties file", ioe);
                 }
             }
+            else {
+                // in case of a file within a jar in classpath, try to open stream using url
+                try {
+                    loadFromJar(preProcessedCommands, configFile);
+                } catch (IOException e) {
+                    s_logger.error("IO Exception loading properties file from jar", e);
+                }
+            }
         }
 
         for (Object key : preProcessedCommands.keySet()) {
@@ -158,6 +163,7 @@ public class PropertiesUtil {
             String value = preProcessedCommand.substring(splitIndex + 1);
             configMap.put((String)key, value);
         }
+
         return configMap;
     }
 


[32/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Formatting the code - Adding final modifier and indenting the code

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/62716636
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/62716636
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/62716636

Branch: refs/heads/feature/vpc-ipv6
Commit: 6271663682ad0cb08a29720f5e4310e79cd894e3
Parents: c3b4c7a
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:30:00 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:04 2015 +0200

----------------------------------------------------------------------
 .../kvm/storage/KVMStorageProcessor.java        | 628 +++++++++----------
 1 file changed, 313 insertions(+), 315 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/62716636/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 3b806e9..201659d 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -37,20 +37,6 @@ import java.util.UUID;
 
 import javax.naming.ConfigurationException;
 
-import com.cloud.hypervisor.Hypervisor;
-import org.apache.commons.io.FileUtils;
-import org.apache.log4j.Logger;
-import org.libvirt.Connect;
-import org.libvirt.Domain;
-import org.libvirt.DomainInfo;
-import org.libvirt.DomainSnapshot;
-import org.libvirt.LibvirtException;
-
-import com.ceph.rados.IoCTX;
-import com.ceph.rados.Rados;
-import com.ceph.rbd.Rbd;
-import com.ceph.rbd.RbdImage;
-
 import org.apache.cloudstack.storage.command.AttachAnswer;
 import org.apache.cloudstack.storage.command.AttachCommand;
 import org.apache.cloudstack.storage.command.CopyCmdAnswer;
@@ -72,7 +58,18 @@ import org.apache.cloudstack.utils.qemu.QemuImg;
 import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 import org.apache.cloudstack.utils.qemu.QemuImgException;
 import org.apache.cloudstack.utils.qemu.QemuImgFile;
+import org.apache.commons.io.FileUtils;
+import org.apache.log4j.Logger;
+import org.libvirt.Connect;
+import org.libvirt.Domain;
+import org.libvirt.DomainInfo;
+import org.libvirt.DomainSnapshot;
+import org.libvirt.LibvirtException;
 
+import com.ceph.rados.IoCTX;
+import com.ceph.rados.Rados;
+import com.ceph.rbd.Rbd;
+import com.ceph.rbd.RbdImage;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
 import com.cloud.agent.api.to.DataObjectType;
@@ -82,6 +79,7 @@ import com.cloud.agent.api.to.DiskTO;
 import com.cloud.agent.api.to.NfsTO;
 import com.cloud.agent.api.to.S3TO;
 import com.cloud.exception.InternalErrorException;
+import com.cloud.hypervisor.Hypervisor;
 import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
 import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
 import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
@@ -110,7 +108,7 @@ public class KVMStorageProcessor implements StorageProcessor {
     private String _manageSnapshotPath;
     private int _cmdsTimeout;
 
-    public KVMStorageProcessor(KVMStoragePoolManager storagePoolMgr, LibvirtComputingResource resource) {
+    public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) {
         this.storagePoolMgr = storagePoolMgr;
         this.resource = resource;
     }
@@ -119,7 +117,7 @@ public class KVMStorageProcessor implements StorageProcessor {
         return "scripts/storage/qcow2";
     }
 
-    public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
+    public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
         storageLayer = new JavaStorageLayer();
         storageLayer.configure("StorageLayer", params);
 
@@ -138,34 +136,34 @@ public class KVMStorageProcessor implements StorageProcessor {
             throw new ConfigurationException("Unable to find the managesnapshot.sh");
         }
 
-        String value = (String)params.get("cmds.timeout");
+        final String value = (String)params.get("cmds.timeout");
         _cmdsTimeout = NumbersUtil.parseInt(value, 7200) * 1000;
         return true;
     }
 
     @Override
-    public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) {
+    public SnapshotAndCopyAnswer snapshotAndCopy(final SnapshotAndCopyCommand cmd) {
         s_logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for KVMStorageProcessor");
 
         return new SnapshotAndCopyAnswer();
     }
 
     @Override
-    public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        TemplateObjectTO template = (TemplateObjectTO)srcData;
-        DataStoreTO imageStore = template.getDataStore();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destData.getDataStore();
+    public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final TemplateObjectTO template = (TemplateObjectTO)srcData;
+        final DataStoreTO imageStore = template.getDataStore();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destData.getDataStore();
 
         if (!(imageStore instanceof NfsTO)) {
             return new CopyCmdAnswer("unsupported protocol");
         }
 
-        NfsTO nfsImageStore = (NfsTO)imageStore;
-        String tmplturl = nfsImageStore.getUrl() + File.separator + template.getPath();
-        int index = tmplturl.lastIndexOf("/");
-        String mountpoint = tmplturl.substring(0, index);
+        final NfsTO nfsImageStore = (NfsTO)imageStore;
+        final String tmplturl = nfsImageStore.getUrl() + File.separator + template.getPath();
+        final int index = tmplturl.lastIndexOf("/");
+        final String mountpoint = tmplturl.substring(0, index);
         String tmpltname = null;
         if (index < tmplturl.length() - 1) {
             tmpltname = tmplturl.substring(index + 1);
@@ -179,11 +177,11 @@ public class KVMStorageProcessor implements StorageProcessor {
             /* Get template vol */
             if (tmpltname == null) {
                 secondaryPool.refresh();
-                List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
+                final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
                 if (disks == null || disks.isEmpty()) {
                     return new PrimaryStorageDownloadAnswer("Failed to get volumes from pool: " + secondaryPool.getUuid());
                 }
-                for (KVMPhysicalDisk disk : disks) {
+                for (final KVMPhysicalDisk disk : disks) {
                     if (disk.getName().endsWith("qcow2")) {
                         tmplVol = disk;
                         break;
@@ -199,11 +197,11 @@ public class KVMStorageProcessor implements StorageProcessor {
 
             /* Copy volume to primary storage */
             s_logger.debug("Copying template to primary storage, template format is " + tmplVol.getFormat() );
-            KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
+            final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
 
             KVMPhysicalDisk primaryVol = null;
             if (destData instanceof VolumeObjectTO) {
-                VolumeObjectTO volume = (VolumeObjectTO)destData;
+                final VolumeObjectTO volume = (VolumeObjectTO)destData;
                 // pass along volume's target size if it's bigger than template's size, for storage types that copy template rather than cloning on deploy
                 if (volume.getSize() != null && volume.getSize() > tmplVol.getVirtualSize()) {
                     s_logger.debug("Using configured size of " + volume.getSize());
@@ -214,7 +212,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                 }
                 primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
             } else if (destData instanceof TemplateObjectTO) {
-                TemplateObjectTO destTempl = (TemplateObjectTO)destData;
+                final TemplateObjectTO destTempl = (TemplateObjectTO)destData;
                 primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
             } else {
                 primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds());
@@ -226,7 +224,7 @@ public class KVMStorageProcessor implements StorageProcessor {
              *
              */
             if (destData.getObjectType() == DataObjectType.TEMPLATE) {
-                TemplateObjectTO newTemplate = new TemplateObjectTO();
+                final TemplateObjectTO newTemplate = new TemplateObjectTO();
                 newTemplate.setPath(primaryVol.getName());
                 newTemplate.setSize(primaryVol.getSize());
                 if (primaryPool.getType() == StoragePoolType.RBD) {
@@ -236,34 +234,34 @@ public class KVMStorageProcessor implements StorageProcessor {
                 }
                 data = newTemplate;
             } else if (destData.getObjectType() == DataObjectType.VOLUME) {
-                VolumeObjectTO volumeObjectTO = new VolumeObjectTO();
+                final VolumeObjectTO volumeObjectTO = new VolumeObjectTO();
                 volumeObjectTO.setPath(primaryVol.getName());
                 volumeObjectTO.setSize(primaryVol.getSize());
-                if (primaryVol.getFormat() == PhysicalDiskFormat.RAW)
+                if (primaryVol.getFormat() == PhysicalDiskFormat.RAW) {
                     volumeObjectTO.setFormat(ImageFormat.RAW);
-                else if (primaryVol.getFormat() == PhysicalDiskFormat.QCOW2) {
+                } else if (primaryVol.getFormat() == PhysicalDiskFormat.QCOW2) {
                     volumeObjectTO.setFormat(ImageFormat.QCOW2);
                 }
                 data = volumeObjectTO;
             }
             return new CopyCmdAnswer(data);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             return new CopyCmdAnswer(e.toString());
         } finally {
             try {
                 if (secondaryPool != null) {
                     secondaryPool.delete();
                 }
-            } catch(Exception e) {
+            } catch(final Exception e) {
                 s_logger.debug("Failed to clean up secondary storage", e);
             }
         }
     }
 
     // this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk
-    private KVMPhysicalDisk templateToPrimaryDownload(String templateUrl, KVMStoragePool primaryPool, String volUuid, Long size, int timeout) {
-        int index = templateUrl.lastIndexOf("/");
-        String mountpoint = templateUrl.substring(0, index);
+    private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) {
+        final int index = templateUrl.lastIndexOf("/");
+        final String mountpoint = templateUrl.substring(0, index);
         String templateName = null;
         if (index < templateUrl.length() - 1) {
             templateName = templateUrl.substring(index + 1);
@@ -276,12 +274,12 @@ public class KVMStorageProcessor implements StorageProcessor {
             /* Get template vol */
             if (templateName == null) {
                 secondaryPool.refresh();
-                List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
+                final List<KVMPhysicalDisk> disks = secondaryPool.listPhysicalDisks();
                 if (disks == null || disks.isEmpty()) {
                     s_logger.error("Failed to get volumes from pool: " + secondaryPool.getUuid());
                     return null;
                 }
-                for (KVMPhysicalDisk disk : disks) {
+                for (final KVMPhysicalDisk disk : disks) {
                     if (disk.getName().endsWith("qcow2")) {
                         templateVol = disk;
                         break;
@@ -305,9 +303,9 @@ public class KVMStorageProcessor implements StorageProcessor {
                 s_logger.debug("Using templates disk size of " + templateVol.getVirtualSize() + "since size passed was " + size);
             }
 
-            KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, primaryPool, timeout);
+            final KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, primaryPool, timeout);
             return primaryVol;
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.error("Failed to download template to primary storage", e);
             return null;
         } finally {
@@ -318,13 +316,13 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        TemplateObjectTO template = (TemplateObjectTO)srcData;
-        DataStoreTO imageStore = template.getDataStore();
-        VolumeObjectTO volume = (VolumeObjectTO)destData;
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
+    public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final TemplateObjectTO template = (TemplateObjectTO)srcData;
+        final DataStoreTO imageStore = template.getDataStore();
+        final VolumeObjectTO volume = (VolumeObjectTO)destData;
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
         KVMPhysicalDisk BaseVol = null;
         KVMStoragePool primaryPool = null;
         KVMPhysicalDisk vol = null;
@@ -350,7 +348,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                 return new CopyCmdAnswer(" Can't create storage volume on storage pool");
             }
 
-            VolumeObjectTO newVol = new VolumeObjectTO();
+            final VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setPath(vol.getName());
             newVol.setSize(volume.getSize());
 
@@ -363,59 +361,59 @@ public class KVMStorageProcessor implements StorageProcessor {
             }
 
             return new CopyCmdAnswer(newVol);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to create volume: ", e);
             return new CopyCmdAnswer(e.toString());
         }
     }
 
     @Override
-    public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        DataStoreTO srcStore = srcData.getDataStore();
-        DataStoreTO destStore = destData.getDataStore();
-        VolumeObjectTO srcVol = (VolumeObjectTO)srcData;
-        ImageFormat srcFormat = srcVol.getFormat();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destStore;
+    public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final DataStoreTO srcStore = srcData.getDataStore();
+        final DataStoreTO destStore = destData.getDataStore();
+        final VolumeObjectTO srcVol = (VolumeObjectTO)srcData;
+        final ImageFormat srcFormat = srcVol.getFormat();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destStore;
         if (!(srcStore instanceof NfsTO)) {
             return new CopyCmdAnswer("can only handle nfs storage");
         }
-        NfsTO nfsStore = (NfsTO)srcStore;
-        String srcVolumePath = srcData.getPath();
-        String secondaryStorageUrl = nfsStore.getUrl();
+        final NfsTO nfsStore = (NfsTO)srcStore;
+        final String srcVolumePath = srcData.getPath();
+        final String secondaryStorageUrl = nfsStore.getUrl();
         KVMStoragePool secondaryStoragePool = null;
         KVMStoragePool primaryPool = null;
         try {
             try {
                 primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
-            } catch (CloudRuntimeException e) {
+            } catch (final CloudRuntimeException e) {
                 if (e.getMessage().contains("not found")) {
                     primaryPool =
-                        storagePoolMgr.createStoragePool(primaryStore.getUuid(), primaryStore.getHost(), primaryStore.getPort(), primaryStore.getPath(), null,
-                            primaryStore.getPoolType());
+                            storagePoolMgr.createStoragePool(primaryStore.getUuid(), primaryStore.getHost(), primaryStore.getPort(), primaryStore.getPath(), null,
+                                    primaryStore.getPoolType());
                 } else {
                     return new CopyCmdAnswer(e.getMessage());
                 }
             }
 
-            String volumeName = UUID.randomUUID().toString();
+            final String volumeName = UUID.randomUUID().toString();
 
-            int index = srcVolumePath.lastIndexOf(File.separator);
-            String volumeDir = srcVolumePath.substring(0, index);
+            final int index = srcVolumePath.lastIndexOf(File.separator);
+            final String volumeDir = srcVolumePath.substring(0, index);
             String srcVolumeName = srcVolumePath.substring(index + 1);
             secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + volumeDir);
             if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) {
                 srcVolumeName = srcVolumeName + ".qcow2";
             }
-            KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(srcVolumeName);
+            final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(srcVolumeName);
             volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString()));
-            KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, cmd.getWaitInMillSeconds());
-            VolumeObjectTO newVol = new VolumeObjectTO();
+            final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, cmd.getWaitInMillSeconds());
+            final VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setFormat(ImageFormat.valueOf(newDisk.getFormat().toString().toUpperCase()));
             newVol.setPath(volumeName);
             return new CopyCmdAnswer(newVol);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to ccopyVolumeFromImageCacheToPrimary: ", e);
             return new CopyCmdAnswer(e.toString());
         } finally {
@@ -426,30 +424,30 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        VolumeObjectTO srcVol = (VolumeObjectTO)srcData;
-        VolumeObjectTO destVol = (VolumeObjectTO)destData;
-        ImageFormat srcFormat = srcVol.getFormat();
-        ImageFormat destFormat = destVol.getFormat();
-        DataStoreTO srcStore = srcData.getDataStore();
-        DataStoreTO destStore = destData.getDataStore();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcStore;
+    public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final VolumeObjectTO srcVol = (VolumeObjectTO)srcData;
+        final VolumeObjectTO destVol = (VolumeObjectTO)destData;
+        final ImageFormat srcFormat = srcVol.getFormat();
+        final ImageFormat destFormat = destVol.getFormat();
+        final DataStoreTO srcStore = srcData.getDataStore();
+        final DataStoreTO destStore = destData.getDataStore();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcStore;
         if (!(destStore instanceof NfsTO)) {
             return new CopyCmdAnswer("can only handle nfs storage");
         }
-        NfsTO nfsStore = (NfsTO)destStore;
-        String srcVolumePath = srcData.getPath();
-        String destVolumePath = destData.getPath();
-        String secondaryStorageUrl = nfsStore.getUrl();
+        final NfsTO nfsStore = (NfsTO)destStore;
+        final String srcVolumePath = srcData.getPath();
+        final String destVolumePath = destData.getPath();
+        final String secondaryStorageUrl = nfsStore.getUrl();
         KVMStoragePool secondaryStoragePool = null;
 
         try {
-            String volumeName = UUID.randomUUID().toString();
+            final String volumeName = UUID.randomUUID().toString();
 
-            String destVolumeName = volumeName + "." + destFormat.getFileExtension();
-            KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), srcVolumePath);
+            final String destVolumeName = volumeName + "." + destFormat.getFileExtension();
+            final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), srcVolumePath);
             volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString()));
 
             secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl);
@@ -457,11 +455,11 @@ public class KVMStorageProcessor implements StorageProcessor {
             storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid());
             secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + destVolumePath);
             storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, secondaryStoragePool, cmd.getWaitInMillSeconds());
-            VolumeObjectTO newVol = new VolumeObjectTO();
+            final VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setPath(destVolumePath + File.separator + destVolumeName);
             newVol.setFormat(destFormat);
             return new CopyCmdAnswer(newVol);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to copyVolumeFromPrimaryToSecondary: ", e);
             return new CopyCmdAnswer(e.toString());
         } finally {
@@ -472,41 +470,41 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer createTemplateFromVolume(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        int wait = cmd.getWaitInMillSeconds();
-        TemplateObjectTO template = (TemplateObjectTO)destData;
-        DataStoreTO imageStore = template.getDataStore();
-        VolumeObjectTO volume = (VolumeObjectTO)srcData;
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
+    public Answer createTemplateFromVolume(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final int wait = cmd.getWaitInMillSeconds();
+        final TemplateObjectTO template = (TemplateObjectTO)destData;
+        final DataStoreTO imageStore = template.getDataStore();
+        final VolumeObjectTO volume = (VolumeObjectTO)srcData;
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
 
         if (!(imageStore instanceof NfsTO)) {
             return new CopyCmdAnswer("unsupported protocol");
         }
-        NfsTO nfsImageStore = (NfsTO)imageStore;
+        final NfsTO nfsImageStore = (NfsTO)imageStore;
 
         KVMStoragePool secondaryStorage = null;
         KVMStoragePool primary = null;
         try {
-            String templateFolder = template.getPath();
+            final String templateFolder = template.getPath();
 
             secondaryStorage = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl());
 
             primary = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
 
-            KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
-            String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateFolder;
+            final KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
+            final String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateFolder;
             storageLayer.mkdirs(tmpltPath);
-            String templateName = UUID.randomUUID().toString();
+            final String templateName = UUID.randomUUID().toString();
 
             if (primary.getType() != StoragePoolType.RBD) {
-                Script command = new Script(_createTmplPath, wait, s_logger);
+                final Script command = new Script(_createTmplPath, wait, s_logger);
                 command.add("-f", disk.getPath());
                 command.add("-t", tmpltPath);
                 command.add("-n", templateName + ".qcow2");
 
-                String result = command.execute();
+                final String result = command.execute();
 
                 if (result != null) {
                     s_logger.debug("failed to create template: " + result);
@@ -515,60 +513,60 @@ public class KVMStorageProcessor implements StorageProcessor {
             } else {
                 s_logger.debug("Converting RBD disk " + disk.getPath() + " into template " + templateName);
 
-                QemuImgFile srcFile =
-                    new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), primary.getSourcePort(), primary.getAuthUserName(),
-                        primary.getAuthSecret(), disk.getPath()));
+                final QemuImgFile srcFile =
+                        new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), primary.getSourcePort(), primary.getAuthUserName(),
+                                primary.getAuthSecret(), disk.getPath()));
                 srcFile.setFormat(PhysicalDiskFormat.RAW);
 
-                QemuImgFile destFile = new QemuImgFile(tmpltPath + "/" + templateName + ".qcow2");
+                final QemuImgFile destFile = new QemuImgFile(tmpltPath + "/" + templateName + ".qcow2");
                 destFile.setFormat(PhysicalDiskFormat.QCOW2);
 
-                QemuImg q = new QemuImg(cmd.getWaitInMillSeconds());
+                final QemuImg q = new QemuImg(cmd.getWaitInMillSeconds());
                 try {
                     q.convert(srcFile, destFile);
-                } catch (QemuImgException e) {
+                } catch (final QemuImgException e) {
                     s_logger.error("Failed to create new template while converting " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " +
-                        e.getMessage());
+                            e.getMessage());
                 }
 
-                File templateProp = new File(tmpltPath + "/template.properties");
+                final File templateProp = new File(tmpltPath + "/template.properties");
                 if (!templateProp.exists()) {
                     templateProp.createNewFile();
                 }
 
                 String templateContent = "filename=" + templateName + ".qcow2" + System.getProperty("line.separator");
 
-                DateFormat dateFormat = new SimpleDateFormat("MM_dd_yyyy");
-                Date date = new Date();
+                final DateFormat dateFormat = new SimpleDateFormat("MM_dd_yyyy");
+                final Date date = new Date();
                 templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator");
 
-                FileOutputStream templFo = new FileOutputStream(templateProp);
+                final FileOutputStream templFo = new FileOutputStream(templateProp);
                 templFo.write(templateContent.getBytes());
                 templFo.flush();
                 templFo.close();
             }
 
-            Map<String, Object> params = new HashMap<String, Object>();
+            final Map<String, Object> params = new HashMap<String, Object>();
             params.put(StorageLayer.InstanceConfigKey, storageLayer);
-            Processor qcow2Processor = new QCOW2Processor();
+            final Processor qcow2Processor = new QCOW2Processor();
 
             qcow2Processor.configure("QCOW2 Processor", params);
 
-            FormatInfo info = qcow2Processor.process(tmpltPath, null, templateName);
+            final FormatInfo info = qcow2Processor.process(tmpltPath, null, templateName);
 
-            TemplateLocation loc = new TemplateLocation(storageLayer, tmpltPath);
+            final TemplateLocation loc = new TemplateLocation(storageLayer, tmpltPath);
             loc.create(1, true, templateName);
             loc.addFormat(info);
             loc.save();
 
-            TemplateObjectTO newTemplate = new TemplateObjectTO();
+            final TemplateObjectTO newTemplate = new TemplateObjectTO();
             newTemplate.setPath(templateFolder + File.separator + templateName + ".qcow2");
             newTemplate.setSize(info.virtualSize);
             newTemplate.setPhysicalSize(info.size);
             newTemplate.setFormat(ImageFormat.QCOW2);
             newTemplate.setName(templateName);
             return new CopyCmdAnswer(newTemplate);
-        } catch (Exception e) {
+        } catch (final Exception e) {
             s_logger.debug("Failed to createTemplateFromVolume: ", e);
             return new CopyCmdAnswer(e.toString());
         } finally {
@@ -579,15 +577,15 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer createTemplateFromSnapshot(CopyCommand cmd) {
+    public Answer createTemplateFromSnapshot(final CopyCommand cmd) {
         return null;  //To change body of implemented methods use File | Settings | File Templates.
     }
 
-    protected String copyToS3(File srcFile, S3TO destStore, String destPath) throws InterruptedException {
+    protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException {
         final String bucket = destStore.getBucketName();
 
-        long srcSize = srcFile.length();
-        String key = destPath + S3Utils.SEPARATOR + srcFile.getName();
+        final long srcSize = srcFile.length();
+        final String key = destPath + S3Utils.SEPARATOR + srcFile.getName();
         if (!destStore.getSingleUpload(srcSize)) {
             mputFile(destStore, srcFile, bucket, key);
         } else {
@@ -596,15 +594,15 @@ public class KVMStorageProcessor implements StorageProcessor {
         return key;
     }
 
-    protected Answer copyToObjectStore(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        DataStoreTO imageStore = destData.getDataStore();
-        NfsTO srcStore = (NfsTO)srcData.getDataStore();
-        String srcPath = srcData.getPath();
-        int index = srcPath.lastIndexOf(File.separator);
-        String srcSnapshotDir = srcPath.substring(0, index);
-        String srcFileName = srcPath.substring(index + 1);
+    protected Answer copyToObjectStore(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final DataStoreTO imageStore = destData.getDataStore();
+        final NfsTO srcStore = (NfsTO)srcData.getDataStore();
+        final String srcPath = srcData.getPath();
+        final int index = srcPath.lastIndexOf(File.separator);
+        final String srcSnapshotDir = srcPath.substring(0, index);
+        final String srcFileName = srcPath.substring(index + 1);
         KVMStoragePool srcStorePool = null;
         File srcFile = null;
         try {
@@ -622,10 +620,10 @@ public class KVMStorageProcessor implements StorageProcessor {
             } else {
                 return new CopyCmdAnswer("Unsupported protocol");
             }
-            SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
+            final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
             newSnapshot.setPath(destPath);
             return new CopyCmdAnswer(newSnapshot);
-        } catch (Exception e) {
+        } catch (final Exception e) {
             s_logger.error("failed to upload" + srcPath, e);
             return new CopyCmdAnswer("failed to upload" + srcPath + e.toString());
         } finally {
@@ -636,55 +634,55 @@ public class KVMStorageProcessor implements StorageProcessor {
                 if (srcStorePool != null) {
                     srcStorePool.delete();
                 }
-            } catch (Exception e) {
+            } catch (final Exception e) {
                 s_logger.debug("Failed to clean up:", e);
             }
         }
     }
 
-    protected Answer backupSnapshotForObjectStore(CopyCommand cmd) {
-        DataTO destData = cmd.getDestTO();
-        DataStoreTO imageStore = destData.getDataStore();
-        DataTO cacheData = cmd.getCacheTO();
+    protected Answer backupSnapshotForObjectStore(final CopyCommand cmd) {
+        final DataTO destData = cmd.getDestTO();
+        final DataStoreTO imageStore = destData.getDataStore();
+        final DataTO cacheData = cmd.getCacheTO();
         if (cacheData == null) {
             return new CopyCmdAnswer("Failed to copy to object store without cache store");
         }
-        DataStoreTO cacheStore = cacheData.getDataStore();
+        final DataStoreTO cacheStore = cacheData.getDataStore();
         ((SnapshotObjectTO)destData).setDataStore(cacheStore);
-        CopyCmdAnswer answer = (CopyCmdAnswer)backupSnapshot(cmd);
+        final CopyCmdAnswer answer = (CopyCmdAnswer)backupSnapshot(cmd);
         if (!answer.getResult()) {
             return answer;
         }
-        SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData();
+        final SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData();
         snapshotOnCacheStore.setDataStore(cacheStore);
         ((SnapshotObjectTO)destData).setDataStore(imageStore);
-        CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence());
+        final CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence());
         return copyToObjectStore(newCpyCmd);
     }
 
     @Override
-    public Answer backupSnapshot(CopyCommand cmd) {
-        DataTO srcData = cmd.getSrcTO();
-        DataTO destData = cmd.getDestTO();
-        SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)snapshot.getDataStore();
-        SnapshotObjectTO destSnapshot = (SnapshotObjectTO)destData;
-        DataStoreTO imageStore = destData.getDataStore();
+    public Answer backupSnapshot(final CopyCommand cmd) {
+        final DataTO srcData = cmd.getSrcTO();
+        final DataTO destData = cmd.getDestTO();
+        final SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)snapshot.getDataStore();
+        final SnapshotObjectTO destSnapshot = (SnapshotObjectTO)destData;
+        final DataStoreTO imageStore = destData.getDataStore();
 
         if (!(imageStore instanceof NfsTO)) {
             return backupSnapshotForObjectStore(cmd);
         }
-        NfsTO nfsImageStore = (NfsTO)imageStore;
+        final NfsTO nfsImageStore = (NfsTO)imageStore;
 
-        String secondaryStoragePoolUrl = nfsImageStore.getUrl();
+        final String secondaryStoragePoolUrl = nfsImageStore.getUrl();
         // NOTE: snapshot name is encoded in snapshot path
-        int index = snapshot.getPath().lastIndexOf("/");
+        final int index = snapshot.getPath().lastIndexOf("/");
 
-        String snapshotName = snapshot.getPath().substring(index + 1);
-        String volumePath = snapshot.getVolume().getPath();
+        final String snapshotName = snapshot.getPath().substring(index + 1);
+        final String volumePath = snapshot.getVolume().getPath();
         String snapshotDestPath = null;
         String snapshotRelPath = null;
-        String vmName = snapshot.getVmName();
+        final String vmName = snapshot.getVmName();
         KVMStoragePool secondaryStoragePool = null;
         Connect conn = null;
         KVMPhysicalDisk snapshotDisk = null;
@@ -694,7 +692,7 @@ public class KVMStorageProcessor implements StorageProcessor {
 
             secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStoragePoolUrl);
 
-            String ssPmountPath = secondaryStoragePool.getLocalPath();
+            final String ssPmountPath = secondaryStoragePool.getLocalPath();
             snapshotRelPath = destSnapshot.getPath();
 
             snapshotDestPath = ssPmountPath + File.separator + snapshotRelPath;
@@ -709,69 +707,69 @@ public class KVMStorageProcessor implements StorageProcessor {
              * This reduces the amount of time and storage it takes to back up a snapshot dramatically
              */
             if (primaryPool.getType() == StoragePoolType.RBD) {
-                String rbdSnapshot = snapshotDisk.getPath() +  "@" + snapshotName;
-                String snapshotFile = snapshotDestPath + "/" + snapshotName;
+                final String rbdSnapshot = snapshotDisk.getPath() +  "@" + snapshotName;
+                final String snapshotFile = snapshotDestPath + "/" + snapshotName;
                 try {
                     s_logger.debug("Attempting to backup RBD snapshot " + rbdSnapshot);
 
-                    File snapDir = new File(snapshotDestPath);
+                    final File snapDir = new File(snapshotDestPath);
                     s_logger.debug("Attempting to create " + snapDir.getAbsolutePath() + " recursively for snapshot storage");
                     FileUtils.forceMkdir(snapDir);
 
-                    QemuImgFile srcFile =
-                        new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primaryPool.getSourceHost(), primaryPool.getSourcePort(), primaryPool.getAuthUserName(),
-                                                                         primaryPool.getAuthSecret(), rbdSnapshot));
+                    final QemuImgFile srcFile =
+                            new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primaryPool.getSourceHost(), primaryPool.getSourcePort(), primaryPool.getAuthUserName(),
+                                    primaryPool.getAuthSecret(), rbdSnapshot));
                     srcFile.setFormat(PhysicalDiskFormat.RAW);
 
-                    QemuImgFile destFile = new QemuImgFile(snapshotFile);
+                    final QemuImgFile destFile = new QemuImgFile(snapshotFile);
                     destFile.setFormat(snapshotDisk.getFormat());
 
                     s_logger.debug("Backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile);
-                    QemuImg q = new QemuImg(cmd.getWaitInMillSeconds());
+                    final QemuImg q = new QemuImg(cmd.getWaitInMillSeconds());
                     q.convert(srcFile, destFile);
 
-                    File snapFile = new File(snapshotFile);
+                    final File snapFile = new File(snapshotFile);
                     if(snapFile.exists()) {
                         size = snapFile.length();
                     }
 
                     s_logger.debug("Finished backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile + " Snapshot size: " + size);
-                } catch (FileNotFoundException e) {
+                } catch (final FileNotFoundException e) {
                     s_logger.error("Failed to open " + snapshotDestPath + ". The error was: " + e.getMessage());
                     return new CopyCmdAnswer(e.toString());
-                } catch (IOException e) {
+                } catch (final IOException e) {
                     s_logger.error("Failed to create " + snapshotDestPath + ". The error was: " + e.getMessage());
                     return new CopyCmdAnswer(e.toString());
-                }  catch (QemuImgException e) {
+                }  catch (final QemuImgException e) {
                     s_logger.error("Failed to backup the RBD snapshot from " + rbdSnapshot +
-                                   " to " + snapshotFile + " the error was: " + e.getMessage());
+                            " to " + snapshotFile + " the error was: " + e.getMessage());
                     return new CopyCmdAnswer(e.toString());
                 }
             } else {
-                Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger);
+                final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger);
                 command.add("-b", snapshotDisk.getPath());
                 command.add("-n", snapshotName);
                 command.add("-p", snapshotDestPath);
                 command.add("-t", snapshotName);
-                String result = command.execute();
+                final String result = command.execute();
                 if (result != null) {
                     s_logger.debug("Failed to backup snaptshot: " + result);
                     return new CopyCmdAnswer(result);
                 }
-                File snapFile = new File(snapshotDestPath + "/" + snapshotName);
+                final File snapFile = new File(snapshotDestPath + "/" + snapshotName);
                 if(snapFile.exists()){
                     size = snapFile.length();
                 }
             }
 
-            SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
+            final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
             newSnapshot.setPath(snapshotRelPath + File.separator + snapshotName);
             newSnapshot.setPhysicalSize(size);
             return new CopyCmdAnswer(newSnapshot);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             s_logger.debug("Failed to backup snapshot: ", e);
             return new CopyCmdAnswer(e.toString());
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to backup snapshot: ", e);
             return new CopyCmdAnswer(e.toString());
         } finally {
@@ -783,15 +781,15 @@ public class KVMStorageProcessor implements StorageProcessor {
                     try {
                         vm = resource.getDomain(conn, vmName);
                         state = vm.getInfo().state;
-                    } catch (LibvirtException e) {
+                    } catch (final LibvirtException e) {
                         s_logger.trace("Ignoring libvirt error.", e);
                     }
                 }
 
-                KVMStoragePool primaryStorage = storagePoolMgr.getStoragePool(primaryStore.getPoolType(),
+                final KVMStoragePool primaryStorage = storagePoolMgr.getStoragePool(primaryStore.getPoolType(),
                         primaryStore.getUuid());
                 if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryStorage.isExternalSnapshot()) {
-                    DomainSnapshot snap = vm.snapshotLookupByName(snapshotName);
+                    final DomainSnapshot snap = vm.snapshotLookupByName(snapshotName);
                     snap.delete(0);
 
                     /*
@@ -805,17 +803,17 @@ public class KVMStorageProcessor implements StorageProcessor {
                     }
                 } else {
                     if (primaryPool.getType() != StoragePoolType.RBD) {
-                        Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger);
+                        final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger);
                         command.add("-d", snapshotDisk.getPath());
                         command.add("-n", snapshotName);
-                        String result = command.execute();
+                        final String result = command.execute();
                         if (result != null) {
                             s_logger.debug("Failed to delete snapshot on primary: " + result);
                             // return new CopyCmdAnswer("Failed to backup snapshot: " + result);
                         }
                     }
                 }
-            } catch (Exception ex) {
+            } catch (final Exception ex) {
                 s_logger.debug("Failed to delete snapshots on primary", ex);
             }
 
@@ -823,36 +821,36 @@ public class KVMStorageProcessor implements StorageProcessor {
                 if (secondaryStoragePool != null) {
                     secondaryStoragePool.delete();
                 }
-            } catch (Exception ex) {
+            } catch (final Exception ex) {
                 s_logger.debug("Failed to delete secondary storage", ex);
             }
         }
     }
 
-    protected synchronized String attachOrDetachISO(Connect conn, String vmName, String isoPath, boolean isAttach) throws LibvirtException, URISyntaxException,
-        InternalErrorException {
+    protected synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach) throws LibvirtException, URISyntaxException,
+    InternalErrorException {
         String isoXml = null;
         if (isoPath != null && isAttach) {
-            int index = isoPath.lastIndexOf("/");
-            String path = isoPath.substring(0, index);
-            String name = isoPath.substring(index + 1);
-            KVMStoragePool secondaryPool = storagePoolMgr.getStoragePoolByURI(path);
-            KVMPhysicalDisk isoVol = secondaryPool.getPhysicalDisk(name);
+            final int index = isoPath.lastIndexOf("/");
+            final String path = isoPath.substring(0, index);
+            final String name = isoPath.substring(index + 1);
+            final KVMStoragePool secondaryPool = storagePoolMgr.getStoragePoolByURI(path);
+            final KVMPhysicalDisk isoVol = secondaryPool.getPhysicalDisk(name);
             isoPath = isoVol.getPath();
 
-            DiskDef iso = new DiskDef();
+            final DiskDef iso = new DiskDef();
             iso.defISODisk(isoPath);
             isoXml = iso.toString();
         } else {
-            DiskDef iso = new DiskDef();
+            final DiskDef iso = new DiskDef();
             iso.defISODisk(null);
             isoXml = iso.toString();
         }
 
-        List<DiskDef> disks = resource.getDisks(conn, vmName);
-        String result = attachOrDetachDevice(conn, true, vmName, isoXml);
+        final List<DiskDef> disks = resource.getDisks(conn, vmName);
+        final String result = attachOrDetachDevice(conn, true, vmName, isoXml);
         if (result == null && !isAttach) {
-            for (DiskDef disk : disks) {
+            for (final DiskDef disk : disks) {
                 if (disk.getDeviceType() == DiskDef.deviceType.CDROM) {
                     resource.cleanupDisk(disk);
                 }
@@ -863,22 +861,22 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer attachIso(AttachCommand cmd) {
-        DiskTO disk = cmd.getDisk();
-        TemplateObjectTO isoTO = (TemplateObjectTO)disk.getData();
-        DataStoreTO store = isoTO.getDataStore();
+    public Answer attachIso(final AttachCommand cmd) {
+        final DiskTO disk = cmd.getDisk();
+        final TemplateObjectTO isoTO = (TemplateObjectTO)disk.getData();
+        final DataStoreTO store = isoTO.getDataStore();
         if (!(store instanceof NfsTO)) {
             return new AttachAnswer("unsupported protocol");
         }
-        NfsTO nfsStore = (NfsTO)store;
+        final NfsTO nfsStore = (NfsTO)store;
         try {
-            Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
+            final Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
             attachOrDetachISO(conn, cmd.getVmName(), nfsStore.getUrl() + File.separator + isoTO.getPath(), true);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             return new Answer(cmd, false, e.toString());
-        } catch (URISyntaxException e) {
+        } catch (final URISyntaxException e) {
             return new Answer(cmd, false, e.toString());
-        } catch (InternalErrorException e) {
+        } catch (final InternalErrorException e) {
             return new Answer(cmd, false, e.toString());
         }
 
@@ -886,29 +884,29 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer dettachIso(DettachCommand cmd) {
-        DiskTO disk = cmd.getDisk();
-        TemplateObjectTO isoTO = (TemplateObjectTO)disk.getData();
-        DataStoreTO store = isoTO.getDataStore();
+    public Answer dettachIso(final DettachCommand cmd) {
+        final DiskTO disk = cmd.getDisk();
+        final TemplateObjectTO isoTO = (TemplateObjectTO)disk.getData();
+        final DataStoreTO store = isoTO.getDataStore();
         if (!(store instanceof NfsTO)) {
             return new AttachAnswer("unsupported protocol");
         }
-        NfsTO nfsStore = (NfsTO)store;
+        final NfsTO nfsStore = (NfsTO)store;
         try {
-            Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
+            final Connect conn = LibvirtConnection.getConnectionByVmName(cmd.getVmName());
             attachOrDetachISO(conn, cmd.getVmName(), nfsStore.getUrl() + File.separator + isoTO.getPath(), false);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             return new Answer(cmd, false, e.toString());
-        } catch (URISyntaxException e) {
+        } catch (final URISyntaxException e) {
             return new Answer(cmd, false, e.toString());
-        } catch (InternalErrorException e) {
+        } catch (final InternalErrorException e) {
             return new Answer(cmd, false, e.toString());
         }
 
         return new Answer(cmd);
     }
 
-    protected synchronized String attachOrDetachDevice(Connect conn, boolean attach, String vmName, String xml) throws LibvirtException, InternalErrorException {
+    protected synchronized String attachOrDetachDevice(final Connect conn, final boolean attach, final String vmName, final String xml) throws LibvirtException, InternalErrorException {
         Domain dm = null;
         try {
             dm = conn.domainLookupByName(vmName);
@@ -920,7 +918,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                 s_logger.debug("Detaching device: " + xml);
                 dm.detachDevice(xml);
             }
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             if (attach) {
                 s_logger.warn("Failed to attach device to " + vmName + ": " + e.getMessage());
             } else {
@@ -931,7 +929,7 @@ public class KVMStorageProcessor implements StorageProcessor {
             if (dm != null) {
                 try {
                     dm.free();
-                } catch (LibvirtException l) {
+                } catch (final LibvirtException l) {
                     s_logger.trace("Ignoring libvirt error.", l);
                 }
             }
@@ -940,23 +938,23 @@ public class KVMStorageProcessor implements StorageProcessor {
         return null;
     }
 
-    protected synchronized String attachOrDetachDisk(Connect conn, boolean attach, String vmName, KVMPhysicalDisk attachingDisk, int devId) throws LibvirtException,
-        InternalErrorException {
+    protected synchronized String attachOrDetachDisk(final Connect conn, final boolean attach, final String vmName, final KVMPhysicalDisk attachingDisk, final int devId) throws LibvirtException,
+    InternalErrorException {
         List<DiskDef> disks = null;
         Domain dm = null;
         DiskDef diskdef = null;
-        KVMStoragePool attachingPool = attachingDisk.getPool();
+        final KVMStoragePool attachingPool = attachingDisk.getPool();
         try {
             if (!attach) {
                 dm = conn.domainLookupByName(vmName);
-                LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
-                String xml = dm.getXMLDesc(0);
+                final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
+                final String xml = dm.getXMLDesc(0);
                 parser.parseDomainXML(xml);
                 disks = parser.getDisks();
 
                 if (attachingPool.getType() == StoragePoolType.RBD) {
                     if (resource.getHypervisorType() == Hypervisor.HypervisorType.LXC) {
-                        String device = resource.mapRbdDevice(attachingDisk);
+                        final String device = resource.mapRbdDevice(attachingDisk);
                         if (device != null) {
                             s_logger.debug("RBD device on host is: "+device);
                             attachingDisk.setPath(device);
@@ -964,8 +962,8 @@ public class KVMStorageProcessor implements StorageProcessor {
                     }
                 }
 
-                for (DiskDef disk : disks) {
-                    String file = disk.getDiskPath();
+                for (final DiskDef disk : disks) {
+                    final String file = disk.getDiskPath();
                     if (file != null && file.equalsIgnoreCase(attachingDisk.getPath())) {
                         diskdef = disk;
                         break;
@@ -979,7 +977,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                 if (attachingPool.getType() == StoragePoolType.RBD) {
                     if(resource.getHypervisorType() == Hypervisor.HypervisorType.LXC){
                         // For LXC, map image to host and then attach to Vm
-                        String device = resource.mapRbdDevice(attachingDisk);
+                        final String device = resource.mapRbdDevice(attachingDisk);
                         if (device != null) {
                             s_logger.debug("RBD device on host is: "+device);
                             diskdef.defBlockBasedDisk(device, devId, DiskDef.diskBus.VIRTIO);
@@ -991,11 +989,11 @@ public class KVMStorageProcessor implements StorageProcessor {
                                 attachingPool.getUuid(), devId, DiskDef.diskBus.VIRTIO, diskProtocol.RBD, DiskDef.diskFmtType.RAW);
                     }
                 } else if (attachingPool.getType() == StoragePoolType.Gluster) {
-                    String mountpoint = attachingPool.getLocalPath();
-                    String path = attachingDisk.getPath();
-                    String glusterVolume = attachingPool.getSourceDir().replace("/", "");
+                    final String mountpoint = attachingPool.getLocalPath();
+                    final String path = attachingDisk.getPath();
+                    final String glusterVolume = attachingPool.getSourceDir().replace("/", "");
                     diskdef.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null,
-                        null, devId, DiskDef.diskBus.VIRTIO, diskProtocol.GLUSTER, DiskDef.diskFmtType.QCOW2);
+                            null, devId, DiskDef.diskBus.VIRTIO, diskProtocol.GLUSTER, DiskDef.diskFmtType.QCOW2);
                 } else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
                     diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.diskBus.VIRTIO, DiskDef.diskFmtType.QCOW2);
                 } else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
@@ -1003,7 +1001,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                 }
             }
 
-            String xml = diskdef.toString();
+            final String xml = diskdef.toString();
             return attachOrDetachDevice(conn, attach, vmName, xml);
         } finally {
             if (dm != null) {
@@ -1013,60 +1011,60 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer attachVolume(AttachCommand cmd) {
-        DiskTO disk = cmd.getDisk();
-        VolumeObjectTO vol = (VolumeObjectTO)disk.getData();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
-        String vmName = cmd.getVmName();
+    public Answer attachVolume(final AttachCommand cmd) {
+        final DiskTO disk = cmd.getDisk();
+        final VolumeObjectTO vol = (VolumeObjectTO)disk.getData();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
+        final String vmName = cmd.getVmName();
         try {
-            Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
+            final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
 
             storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath(), disk.getDetails());
 
-            KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
+            final KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
 
             attachOrDetachDisk(conn, true, vmName, phyDisk, disk.getDiskSeq().intValue());
 
             return new AttachAnswer(disk);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e);
             storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
             return new AttachAnswer(e.toString());
-        } catch (InternalErrorException e) {
+        } catch (final InternalErrorException e) {
             s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e);
             return new AttachAnswer(e.toString());
         }
     }
 
     @Override
-    public Answer dettachVolume(DettachCommand cmd) {
-        DiskTO disk = cmd.getDisk();
-        VolumeObjectTO vol = (VolumeObjectTO)disk.getData();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
-        String vmName = cmd.getVmName();
+    public Answer dettachVolume(final DettachCommand cmd) {
+        final DiskTO disk = cmd.getDisk();
+        final VolumeObjectTO vol = (VolumeObjectTO)disk.getData();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
+        final String vmName = cmd.getVmName();
         try {
-            Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
+            final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
 
-            KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
+            final KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
 
             attachOrDetachDisk(conn, false, vmName, phyDisk, disk.getDiskSeq().intValue());
 
             storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
 
             return new DettachAnswer(disk);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e);
             return new DettachAnswer(e.toString());
-        } catch (InternalErrorException e) {
+        } catch (final InternalErrorException e) {
             s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e);
             return new DettachAnswer(e.toString());
         }
     }
 
     @Override
-    public Answer createVolume(CreateObjectCommand cmd) {
-        VolumeObjectTO volume = (VolumeObjectTO)cmd.getData();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
+    public Answer createVolume(final CreateObjectCommand cmd) {
+        final VolumeObjectTO volume = (VolumeObjectTO)cmd.getData();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volume.getDataStore();
 
         KVMStoragePool primaryPool = null;
         KVMPhysicalDisk vol = null;
@@ -1081,9 +1079,9 @@ public class KVMStorageProcessor implements StorageProcessor {
                 format = PhysicalDiskFormat.valueOf(volume.getFormat().toString().toUpperCase());
             }
             vol = primaryPool.createPhysicalDisk(volume.getUuid(), format,
-                                                 volume.getProvisioningType(), disksize);
+                    volume.getProvisioningType(), disksize);
 
-            VolumeObjectTO newVol = new VolumeObjectTO();
+            final VolumeObjectTO newVol = new VolumeObjectTO();
             if(vol != null) {
                 newVol.setPath(vol.getName());
             }
@@ -1091,46 +1089,46 @@ public class KVMStorageProcessor implements StorageProcessor {
             newVol.setFormat(ImageFormat.valueOf(format.toString().toUpperCase()));
 
             return new CreateObjectAnswer(newVol);
-        } catch (Exception e) {
+        } catch (final Exception e) {
             s_logger.debug("Failed to create volume: ", e);
             return new CreateObjectAnswer(e.toString());
         }
     }
 
     protected static final MessageFormat SnapshotXML = new MessageFormat("   <domainsnapshot>" + "       <name>{0}</name>" + "          <domain>"
-        + "            <uuid>{1}</uuid>" + "        </domain>" + "    </domainsnapshot>");
+            + "            <uuid>{1}</uuid>" + "        </domain>" + "    </domainsnapshot>");
 
     @Override
-    public Answer createSnapshot(CreateObjectCommand cmd) {
-        SnapshotObjectTO snapshotTO = (SnapshotObjectTO)cmd.getData();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)snapshotTO.getDataStore();
-        VolumeObjectTO volume = snapshotTO.getVolume();
-        String snapshotName = UUID.randomUUID().toString();
-        String vmName = volume.getVmName();
+    public Answer createSnapshot(final CreateObjectCommand cmd) {
+        final SnapshotObjectTO snapshotTO = (SnapshotObjectTO)cmd.getData();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)snapshotTO.getDataStore();
+        final VolumeObjectTO volume = snapshotTO.getVolume();
+        final String snapshotName = UUID.randomUUID().toString();
+        final String vmName = volume.getVmName();
         try {
-            Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
+            final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
             DomainInfo.DomainState state = null;
             Domain vm = null;
             if (vmName != null) {
                 try {
                     vm = resource.getDomain(conn, vmName);
                     state = vm.getInfo().state;
-                } catch (LibvirtException e) {
+                } catch (final LibvirtException e) {
                     s_logger.trace("Ignoring libvirt error.", e);
                 }
             }
 
-            KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
+            final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
 
-            KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
+            final KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath());
             if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) {
-                String vmUuid = vm.getUUIDString();
-                Object[] args = new Object[] {snapshotName, vmUuid};
-                String snapshot = SnapshotXML.format(args);
+                final String vmUuid = vm.getUUIDString();
+                final Object[] args = new Object[] {snapshotName, vmUuid};
+                final String snapshot = SnapshotXML.format(args);
 
-                long start = System.currentTimeMillis();
+                final long start = System.currentTimeMillis();
                 vm.snapshotCreateXML(snapshot);
-                long total = (System.currentTimeMillis() - start)/1000;
+                final long total = (System.currentTimeMillis() - start)/1000;
                 s_logger.debug("snapshot takes " + total + " seconds to finish");
 
                 /*
@@ -1158,23 +1156,23 @@ public class KVMStorageProcessor implements StorageProcessor {
                  */
                 if (primaryPool.getType() == StoragePoolType.RBD) {
                     try {
-                        Rados r = new Rados(primaryPool.getAuthUserName());
+                        final Rados r = new Rados(primaryPool.getAuthUserName());
                         r.confSet("mon_host", primaryPool.getSourceHost() + ":" + primaryPool.getSourcePort());
                         r.confSet("key", primaryPool.getAuthSecret());
                         r.confSet("client_mount_timeout", "30");
                         r.connect();
                         s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
 
-                        IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
-                        Rbd rbd = new Rbd(io);
-                        RbdImage image = rbd.open(disk.getName());
+                        final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir());
+                        final Rbd rbd = new Rbd(io);
+                        final RbdImage image = rbd.open(disk.getName());
 
                         s_logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName);
                         image.snapCreate(snapshotName);
 
                         rbd.close(image);
                         r.ioCtxDestroy(io);
-                    } catch (Exception e) {
+                    } catch (final Exception e) {
                         s_logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage());
                     }
                 } else {
@@ -1182,7 +1180,7 @@ public class KVMStorageProcessor implements StorageProcessor {
                     final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger);
                     command.add("-c", disk.getPath());
                     command.add("-n", snapshotName);
-                    String result = command.execute();
+                    final String result = command.execute();
                     if (result != null) {
                         s_logger.debug("Failed to manage snapshot: " + result);
                         return new CreateObjectAnswer("Failed to manage snapshot: " + result);
@@ -1190,58 +1188,58 @@ public class KVMStorageProcessor implements StorageProcessor {
                 }
             }
 
-            SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
+            final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
             // NOTE: sort of hack, we'd better just put snapshtoName
             newSnapshot.setPath(disk.getPath() + File.separator + snapshotName);
             return new CreateObjectAnswer(newSnapshot);
-        } catch (LibvirtException e) {
+        } catch (final LibvirtException e) {
             s_logger.debug("Failed to manage snapshot: ", e);
             return new CreateObjectAnswer("Failed to manage snapshot: " + e.toString());
         }
     }
 
     @Override
-    public Answer deleteVolume(DeleteCommand cmd) {
-        VolumeObjectTO vol = (VolumeObjectTO)cmd.getData();
-        PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
+    public Answer deleteVolume(final DeleteCommand cmd) {
+        final VolumeObjectTO vol = (VolumeObjectTO)cmd.getData();
+        final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
         try {
-            KVMStoragePool pool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
+            final KVMStoragePool pool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
             try {
                 pool.getPhysicalDisk(vol.getPath());
-            } catch (Exception e) {
+            } catch (final Exception e) {
                 s_logger.debug("can't find volume: " + vol.getPath() + ", return true");
                 return new Answer(null);
             }
             pool.deletePhysicalDisk(vol.getPath(), vol.getFormat());
             return new Answer(null);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to delete volume: ", e);
             return new Answer(null, false, e.toString());
         }
     }
 
     @Override
-    public Answer createVolumeFromSnapshot(CopyCommand cmd) {
+    public Answer createVolumeFromSnapshot(final CopyCommand cmd) {
         try {
-            DataTO srcData = cmd.getSrcTO();
-            SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
-            DataTO destData = cmd.getDestTO();
-            PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore();
-            DataStoreTO imageStore = srcData.getDataStore();
-            VolumeObjectTO volume = snapshot.getVolume();
+            final DataTO srcData = cmd.getSrcTO();
+            final SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
+            final DataTO destData = cmd.getDestTO();
+            final PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore();
+            final DataStoreTO imageStore = srcData.getDataStore();
+            final VolumeObjectTO volume = snapshot.getVolume();
 
             if (!(imageStore instanceof NfsTO)) {
                 return new CopyCmdAnswer("unsupported protocol");
             }
 
-            NfsTO nfsImageStore = (NfsTO)imageStore;
+            final NfsTO nfsImageStore = (NfsTO)imageStore;
 
-            String snapshotFullPath = snapshot.getPath();
-            int index = snapshotFullPath.lastIndexOf("/");
-            String snapshotPath = snapshotFullPath.substring(0, index);
-            String snapshotName = snapshotFullPath.substring(index + 1);
-            KVMStoragePool secondaryPool = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl() + File.separator + snapshotPath);
-            KVMPhysicalDisk snapshotDisk = secondaryPool.getPhysicalDisk(snapshotName);
+            final String snapshotFullPath = snapshot.getPath();
+            final int index = snapshotFullPath.lastIndexOf("/");
+            final String snapshotPath = snapshotFullPath.substring(0, index);
+            final String snapshotName = snapshotFullPath.substring(index + 1);
+            final KVMStoragePool secondaryPool = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl() + File.separator + snapshotPath);
+            final KVMPhysicalDisk snapshotDisk = secondaryPool.getPhysicalDisk(snapshotName);
 
             if (volume.getFormat() == ImageFormat.RAW) {
                 snapshotDisk.setFormat(PhysicalDiskFormat.RAW);
@@ -1249,34 +1247,34 @@ public class KVMStorageProcessor implements StorageProcessor {
                 snapshotDisk.setFormat(PhysicalDiskFormat.QCOW2);
             }
 
-            String primaryUuid = pool.getUuid();
-            KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getPoolType(), primaryUuid);
-            String volUuid = UUID.randomUUID().toString();
-            KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool, cmd.getWaitInMillSeconds());
-            VolumeObjectTO newVol = new VolumeObjectTO();
+            final String primaryUuid = pool.getUuid();
+            final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getPoolType(), primaryUuid);
+            final String volUuid = UUID.randomUUID().toString();
+            final KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool, cmd.getWaitInMillSeconds());
+            final VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setPath(disk.getName());
             newVol.setSize(disk.getVirtualSize());
             newVol.setFormat(ImageFormat.valueOf(disk.getFormat().toString().toUpperCase()));
 
             return new CopyCmdAnswer(newVol);
-        } catch (CloudRuntimeException e) {
+        } catch (final CloudRuntimeException e) {
             s_logger.debug("Failed to createVolumeFromSnapshot: ", e);
             return new CopyCmdAnswer(e.toString());
         }
     }
 
     @Override
-    public Answer deleteSnapshot(DeleteCommand cmd) {
+    public Answer deleteSnapshot(final DeleteCommand cmd) {
         return new Answer(cmd);
     }
 
     @Override
-    public Answer introduceObject(IntroduceObjectCmd cmd) {
+    public Answer introduceObject(final IntroduceObjectCmd cmd) {
         return new Answer(cmd, false, "not implememented yet");
     }
 
     @Override
-    public Answer forgetObject(ForgetObjectCmd cmd) {
+    public Answer forgetObject(final ForgetObjectCmd cmd) {
         return new Answer(cmd, false, "not implememented yet");
     }
 


[24/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8525: NPE while updating the state of the volume after deletion

The volume is already deleted (may be by the cleanup thread) and hence
the NPE. Added a not null check for the volumevo and returning false
from the state transition

This closes #321


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b31b8425
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b31b8425
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b31b8425

Branch: refs/heads/feature/vpc-ipv6
Commit: b31b8425df9b442a572aec7a2d462b80881fea0f
Parents: bec44bf
Author: Rajani Karuturi <ra...@gmail.com>
Authored: Thu May 28 11:41:33 2015 +0530
Committer: Rajani Karuturi <ra...@gmail.com>
Committed: Wed Jun 3 11:45:02 2015 +0530

----------------------------------------------------------------------
 .../cloudstack/storage/volume/VolumeObject.java |  6 +-
 .../storage/volume/VolumeObjectTest.java        | 77 ++++++++++++++++++++
 2 files changed, 81 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b31b8425/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
index 1f574d5..e851870 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -178,8 +178,10 @@ public class VolumeObject implements VolumeInfo {
         boolean result = false;
         try {
             volumeVO = volumeDao.findById(volumeVO.getId());
-            result = _volStateMachine.transitTo(volumeVO, event, null, volumeDao);
-            volumeVO = volumeDao.findById(volumeVO.getId());
+            if(volumeVO != null) {
+                result = _volStateMachine.transitTo(volumeVO, event, null, volumeDao);
+                volumeVO = volumeDao.findById(volumeVO.getId());
+            }
         } catch (NoTransitionException e) {
             String errorMessage = "Failed to transit volume: " + getVolumeId() + ", due to: " + e.toString();
             s_logger.debug(errorMessage);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b31b8425/engine/storage/volume/test/org/apache/cloudstack/storage/volume/VolumeObjectTest.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/VolumeObjectTest.java b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/VolumeObjectTest.java
new file mode 100644
index 0000000..8118013
--- /dev/null
+++ b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/VolumeObjectTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.volume;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
+import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.cloud.storage.Storage;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@RunWith(MockitoJUnitRunner.class)
+public class VolumeObjectTest {
+
+    @Mock
+    VolumeDao volumeDao;
+
+    @Mock
+    VolumeDataStoreDao volumeStoreDao;
+
+    @Mock
+    ObjectInDataStoreManager objectInStoreMgr;
+
+    @Mock
+    VMInstanceDao vmInstanceDao;
+
+    @Mock
+    DiskOfferingDao diskOfferingDao;
+
+    @InjectMocks
+    VolumeObject volumeObject;
+
+    @Before
+    public void setUp() throws Exception {
+        volumeObject.configure(Mockito.mock(DataStore.class), new VolumeVO("name", 1l, 1l, 1l, 1l, 1l, "folder", "path", Storage.ProvisioningType.THIN, 1l, Volume.Type.DATADISK));
+    }
+
+    /**
+     * Tests the following scenario:
+     * If the volume gets deleted by another thread (cleanup) and the cleanup is attempted again, the volume isnt found in DB and hence NPE occurs
+     * during transition
+     */
+    @Test
+    public void testStateTransit() {
+        boolean result = volumeObject.stateTransit(Volume.Event.OperationFailed);
+        Assert.assertFalse("since the volume doesnt exist in the db, the operation should fail but, should not throw any exception", result);
+    }
+}
\ No newline at end of file


[14/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/108a74a6/test/integration/component/maint/testpath_vMotion_vmware.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/testpath_vMotion_vmware.py b/test/integration/component/maint/testpath_vMotion_vmware.py
new file mode 100644
index 0000000..bc5dbd7
--- /dev/null
+++ b/test/integration/component/maint/testpath_vMotion_vmware.py
@@ -0,0 +1,2983 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+""" Test cases for Test Paths Storage Migration
+"""
+from nose.plugins.attrib import attr
+from marvin.cloudstackTestCase import cloudstackTestCase, unittest
+from marvin.lib.utils import (cleanup_resources,
+                              random_gen,
+                              format_volume_to_ext3,
+                              validateList,
+                              is_server_ssh_ready
+                              )
+from marvin.lib.base import (Account,
+                             ServiceOffering,
+                             DiskOffering,
+                             Volume,
+                             Template,
+                             VirtualMachine,
+                             StoragePool,
+                             Snapshot,
+                             VmSnapshot,
+                             Configurations,
+                             Host,
+                             NATRule,
+                             PublicIPAddress,
+                             StaticNATRule,
+                             FireWallRule,
+                             Network
+                             )
+from marvin.lib.common import (get_domain,
+                               get_zone,
+                               get_template,
+                               list_volumes,
+                               list_virtual_machines,
+                               list_clusters,
+                               list_storage_pools,
+                               list_hosts,
+                               get_windows_template,
+                               list_publicIP,
+                               list_nat_rules,
+                               list_ssvms
+                               )
+
+from marvin.cloudstackAPI import (deleteVolume,
+                                  enableStorageMaintenance,
+                                  cancelStorageMaintenance
+                                  )
+import hashlib
+from marvin.sshClient import SshClient
+from marvin.codes import FAILED, PASS, FAIL
+from ddt import ddt, data, unpack
+import time
+from threading import Thread
+
+
+def MigrateDataVolume(self,
+                      volume,
+                      destinationPool,
+                      islive=False,
+                      expectexception=False
+                      ):
+    """ Migrate given volume to type of storage pool mentioned in migrateto:
+
+        Inputs:
+            1. volume:           Volume to be migrated
+            2. migrate_to:       Scope of desired Storage pool to which volume
+                                 is to be migrated
+            3. expectexception:  If exception is expected while migration
+    """
+
+    if expectexception:
+        with self.assertRaises(Exception):
+            Volume.migrate(
+                self.apiclient,
+                volumeid=volume.id,
+                storageid=destinationPool.id,
+                livemigrate=islive
+            )
+    else:
+        Volume.migrate(
+            self.apiclient,
+            volumeid=volume.id,
+            storageid=destinationPool.id,
+            livemigrate=islive
+        )
+
+        migrated_volume_response = list_volumes(
+            self.apiclient,
+            id=volume.id
+        )
+
+        self.assertEqual(
+            isinstance(migrated_volume_response, list),
+            True,
+            "Check list volumes response for valid list"
+        )
+
+        self.assertNotEqual(
+            migrated_volume_response,
+            None,
+            "Check if volume exists in ListVolumes"
+        )
+
+        migrated_volume = migrated_volume_response[0]
+
+        self.assertEqual(
+            str(migrated_volume.state).lower(),
+            'ready',
+            "Check migrated volume is in Ready state"
+        )
+
+        self.assertEqual(
+            migrated_volume.storage,
+            destinationPool.name,
+            "Check volume is on migrated pool"
+        )
+    return
+
+
+def VmSnapshotToCheckDataIntegrity(self, vm):
+    """
+    This method takes VMSnapshot of the VM post migration
+    to check data integrity.
+    VM snapshot is not possible if VM's volumes have snapshots.
+    So, first we will check if there are any volume
+    snapshots after migration and delete them if
+    there are any. Once VM snapshot is successful,
+    Delete the VM snapshot
+    """
+    volumes = list_volumes(self.apiclient, virtualmachineid=vm.id,
+                listall=True)
+    for vol in volumes:
+        snapshot = Snapshot.list(self.apiclient, volumeid=vol.id,
+                     listall=True)
+        if(snapshot):
+            for snap in snapshot:
+                try:
+                    Snapshot.deletesnap(self.apiclient, snapid=snap.id)
+                except Exception as e:
+                    raise Exception("Warning: Exception during Volume snapshot deletion : %s" % e)
+    #Take VM snapshot to check data integrity
+    try:
+        vm_snapshot = VmSnapshot.create(self.apiclient, vmid=vm.id)
+    except Exception as e:
+        raise Exception("Warning: Exception during VM snapshot creation : %s" % e)
+
+    #Delete the snapshot
+    try:
+        VmSnapshot.deleteVMSnapshot(self.apiclient, vmsnapshotid=vm_snapshot.id)
+    except Exception as e:
+        raise Exception("Warning: Exception during VM snapshot deletion : %s" % e)
+
+    return
+
+
+def MigrateVmWithVolume(self, vm, destinationHost, volumes, pools):
+    """
+        This method is used to migrate a vm and its volumes using migrate virtual machine with volume API
+        INPUTS:
+               1. vm -> virtual machine object
+               2. destinationHost -> the host to which VM will be migrated
+               3. volumes -> list of volumes which are to be migrated
+               4. pools -> list of destination pools
+    """
+    if not destinationHost:
+        self.debug("Destination host is NULL so migration can't be performed")
+        return
+    vol_pool_map = {}
+    for vol, pool in zip(volumes, pools):
+        vol_pool_map.update({vol.id: pool.id})
+
+    vm.migrate_vm_with_volume(
+                                  self.apiclient,
+                                  hostid=destinationHost.id,
+                                  migrateto=vol_pool_map
+                                  )
+    vm.getState(
+                    self.apiclient,
+                    "Running"
+                )
+    #check for the VM's host and volume's storage post migration
+    migrated_vm_response = list_virtual_machines(self.apiclient, id=vm.id)
+    self.assertEqual(
+                         isinstance(migrated_vm_response, list),
+                         True,
+                         "Check list virtual machines response for valid list"
+                    )
+    self.assertEqual(
+                         migrated_vm_response[0].hostid,
+                         destinationHost.id,
+                         "VM did not migrate to a specified host"
+                    )
+
+    for vol, pool in zip(volumes, pools):
+        migrated_volume_response = list_volumes(self.apiclient, virtualmachineid=migrated_vm_response[0].id, name=vol.name, listall=True)
+        self.assertEqual(
+                         isinstance(migrated_volume_response, list),
+                         True,
+                         "Check list virtual machines response for valid list"
+                        )
+        self.assertEqual(
+                         migrated_volume_response[0].storageid,
+                         pool.id,
+                         "Volume did not migrate to a specified pool"
+                        )
+
+        self.assertEqual(
+                         str(migrated_volume_response[0].state).lower(),
+                         'ready',
+                         "Check migrated volume is in Ready state"
+                        )
+
+    return migrated_vm_response[0]
+
+
+def MigrateVm(self, vm, destinationHost):
+    """
+    This method is to migrate a VM using migrate virtual machine API
+    """
+    if not destinationHost:
+        self.debug("Destination host is NULL so migration can't be performed")
+        return
+    vm.migrate(
+                    self.apiclient,
+                    hostid=destinationHost.id,
+                 )
+    vm.getState(
+                    self.apiclient,
+                    "Running"
+                    )
+    #check for the VM's host and volume's storage post migration
+    migrated_vm_response = list_virtual_machines(self.apiclient, id=vm.id)
+    self.assertEqual(
+                         isinstance(migrated_vm_response, list),
+                         True,
+                         "Check list virtual machines response for valid list"
+                        )
+    self.assertEqual(
+                         migrated_vm_response[0].hostid,
+                         destinationHost.id,
+                         "VM did not migrate to a specified host"
+                        )
+    return migrated_vm_response[0]
+
+
+def get_destination_pools_hosts(self, vm, storage_scope, storage_type):
+    """
+    Get destination Pools for all volumes and destination Host for the VM
+    This method is use in case we use the API migrate volume with storage
+    """
+
+    destinationPools = []
+    vol_list = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+    # For each volume get destination pool
+    for vol in vol_list:
+        pool = GetDestinationStoragePool(self, vol.storage, storage_scope, storage_type)
+        destinationPools.append(pool)
+        #Get destination host
+    destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+    return destinationHost, destinationPools, vol_list
+
+
+def check_files(self, vm, destinationHost):
+    """
+    Check for VMX and VMDK files
+            INPUTS :
+                      1. vm -> The Virtual Machine object
+                      2. destinationHost -> The host to which we want to migrate the VM
+    """
+    # list volumes and their pools
+    # Here we list all the volumes of the VM , then login to the destination host
+    # and check for vmx and vmdk files in the storage
+
+    vm_volumes = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+    for vol in vm_volumes:
+        spool = list_storage_pools(self.apiclient, id=vol.storageid)
+        split_path = spool[0].path.split("/")
+        pool_path = split_path[2]
+        if spool[0].type == "NetworkFilesystem":
+            pool_path = spool[0].id.replace("-", "")
+        sshclient = SshClient(
+                              host=destinationHost.ipaddress,
+                              port=self.testdata['configurableData']['host']["publicport"],
+                              user=self.testdata['configurableData']['host']["username"],
+                              passwd=self.testdata['configurableData']['host']["password"],
+                              )
+        pool_data_vmdk = sshclient.execute("ls /vmfs/volumes/" + pool_path + "/" + vm.instancename + "| grep vmdk")
+        pool_data_vmx = sshclient.execute("ls /vmfs/volumes/" + pool_path + "/" + vm.instancename + "| grep vmx")
+        self.debug("------------------------volume's actual path is: %s" % vol.path)
+        vol_path_db = self.dbclient.execute("select path from volumes where uuid='%s';" % vol.id)
+        self.debug("-----------------------volume's  path in DB is: %s" % vol_path_db)
+        vol_name_db = self.dbclient.execute("select name from volumes where uuid='%s';" % vol.id)
+        self.debug("-----------------------volume's  name in DB is: %s" % vol_name_db)
+        if(pool_data_vmx):
+            vmx_file = vm.instancename + ".vmx"
+            if vol.type == "ROOT":
+                self.assertIn(
+                      vmx_file,
+                      pool_data_vmx,
+                      "The VMX files are missing"
+                      )
+        if(pool_data_vmdk):
+            vmdk_file1 = vol.path + ".vmdk"
+            vmdk_file2 = vol.path + "-flat.vmdk"
+
+            self.assertIn(
+                      vmdk_file1,
+                      pool_data_vmdk,
+                      "The VMDK files are missing"
+                      )
+            self.assertIn(
+                      vmdk_file2,
+                      pool_data_vmdk,
+                      "The VMDK flat files are missing"
+                      )
+    return
+
+
+def GetDestinationStoragePool(self, poolsToavoid, storage_scope, storage_type):
+    """ Get destination pool which has scope same as migrateto
+        and which is not in avoid set
+    """
+
+    destinationPool = None
+    destinationCluster = None
+    if storage_scope == "within_cluster" or storage_scope == "across_cluster":
+        scope = "CLUSTER"
+    else:
+        scope = "ZONE"
+
+    pool = list_storage_pools(self.apiclient, name=poolsToavoid)
+    clusters = list_clusters(self.apiclient, listall=True)
+    if storage_scope == "across_cluster":
+        for cluster in clusters:
+            if cluster.id not in pool[0].clusterid:
+                if len(list_storage_pools(self.apiclient, clusterid=cluster.id)) > 0:
+                    destinationCluster = cluster
+                    break
+        pools_in_cluster = list_storage_pools(self.apiclient, clusterid=destinationCluster.id, scope=scope)
+        for pool in pools_in_cluster:
+            if pool.type == storage_type:
+                destinationPool = pool
+                break
+        return destinationPool
+    elif storage_scope == "within_cluster":
+        destinationCluster = list_clusters(self.apiclient, id=pool[0].clusterid, listall=True)[0]
+        storagepools = list_storage_pools(self.apiclient, clusterid=destinationCluster.id, scope=scope)
+        for pool in storagepools:
+            if pool.name not in poolsToavoid and pool.type == storage_type:
+                destinationPool = pool
+        return destinationPool
+    elif storage_scope == "ZONE":
+        storagepools = list_storage_pools(self.apiclient, scope=scope)
+        for pool in storagepools:
+            if pool.name not in poolsToavoid and pool.type == storage_type:
+                destinationPool = pool
+        return destinationPool
+
+
+def restart_mgmt_server(self, hostip, port, username, password):
+    """Restarts the management server"""
+
+    try:
+        # Get the SSH client
+        ssh = is_server_ssh_ready(
+            hostip,
+            port,
+            username,
+            password,
+        )
+        result = ssh.execute("/etc/init.d/cloudstack-management restart")
+        res = str(result)
+        # Server Stop - OK
+        # Server Start - OK
+        if res.count("OK") != 2:
+            raise ("ErrorInReboot!")
+    except Exception as e:
+        raise e
+    return
+
+
+def check_host_capacity(self, hostid, vm):
+    """Checks whether host has enough capacity to migrate the VM
+    """
+    host = list_hosts(self.apiclient, id=hostid, listall=True)[0]
+    host_memory_available_in_MB = (host.memorytotal - host.memoryallocated) / 1024 * 1024 * 0.8
+    memory_of_vm = vm.memory
+    host_cpu_available_in_MHz = (host.cpuspeed - host.cpuspeed * float(host.cpuallocated.replace("%", "")) / 100) * 0.8
+    cpu_of_vm = vm.cpuspeed
+    if host_memory_available_in_MB > memory_of_vm and host_cpu_available_in_MHz > cpu_of_vm:
+        return PASS
+    else:
+        return FAILED
+
+
+def check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype=None):
+    """
+    This function allocated a public ip, and creates a nat rule for the VM
+    Then tries to ssh into the VM using that public IP
+    This function again is to check VM accessibility post migration
+    """
+    if ostype == "windows":
+        self.debug("SSH check on the VM can't be done as it is a windows VM")
+        return
+
+    src_nat_ip_addrs = list_publicIP(
+            self.apiclient,
+            account=self.account.name,
+            domainid=self.account.domainid
+        )
+    self.assertEqual(
+            isinstance(src_nat_ip_addrs, list),
+            True,
+            "Check list response returns a valid list"
+        )
+    src_nat_ip_addr = src_nat_ip_addrs[0]
+        # Open up firewall port for SSH
+    firewall_rule = FireWallRule.create(
+                    self.apiclient,
+                    ipaddressid=src_nat_ip_addr.id,
+                    protocol=self.testdata["natrule"]["protocol"],
+                    cidrlist=['0.0.0.0/0'],
+                    startport=self.testdata["natrule"]["publicport"],
+                    endport=self.testdata["natrule"]["publicport"]
+                 )
+
+        # Create NAT rule
+    nat_rule = NATRule.create(
+            self.apiclient,
+            virtual_machine_1,
+            self.testdata["natrule"],
+            src_nat_ip_addr.id
+         )
+
+    list_nat_rule_response = list_nat_rules(
+            self.apiclient,
+            id=nat_rule.id
+         )
+    self.assertEqual(
+            isinstance(list_nat_rule_response, list),
+            True,
+            "Check list response returns a valid list"
+         )
+
+    self.assertNotEqual(
+            len(list_nat_rule_response),
+            0,
+            "Check Port Forwarding Rule is created"
+         )
+    self.assertEqual(
+            list_nat_rule_response[0].id,
+            nat_rule.id,
+            "Check Correct Port forwarding Rule is returned"
+        )
+        # SSH virtual machine to test port forwarding
+    try:
+        self.debug("SSHing into VM with IP address %s with NAT IP %s" %
+                       (
+                           virtual_machine_1.ipaddress,
+                           src_nat_ip_addr.ipaddress
+                       ))
+
+        virtual_machine_1.get_ssh_client(src_nat_ip_addr.ipaddress)
+        vm_response = VirtualMachine.list(
+                self.apiclient,
+                id=virtual_machine_1.id
+            )
+        if vm_response[0].state != 'Running':
+            self.fail(
+                    "State of VM : %s is not found to be Running" % str(
+                        virtual_machine_1.ipaddress))
+    except Exception as e:
+        self.fail(
+                "SSH Access failed for %s: %s" %
+                (virtual_machine_1.ipaddress, e)
+            )
+
+    try:
+        nat_rule.delete(self.apiclient)
+    except Exception as e:
+        self.fail("NAT Rule Deletion Failed: %s" % e)
+
+    try:
+        firewall_rule.delete(self.apiclient)
+    except Exception as e:
+        self.fail("Firewall Rule Deletion Failed: %s" % e)
+
+    return
+
+
+@ddt
+class TestStorageLiveMigrationVmware(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        testClient = super(TestStorageLiveMigrationVmware, cls).getClsTestClient()
+        cls.apiclient = testClient.getApiClient()
+        cls.testdata = testClient.getParsedTestDataConfig()
+        cls.hypervisor = cls.testClient.getHypervisorInfo()
+        cls.dbclient = cls.testClient.getDbConnection()
+        cls.exceptionList = []
+        # Get Zone, Domain and templates
+        cls.domain = get_domain(cls.apiclient)
+        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
+
+        cls.template = get_template(
+            cls.apiclient,
+            cls.zone.id,
+            cls.testdata["ostype"])
+
+        cls._cleanup = []
+
+        if cls.hypervisor.lower() not in [
+                "vmware",
+                "kvm",
+                "xenserver",
+                "hyper-v"]:
+            raise cls.skipTest(
+                "Storage migration not supported on %s" %
+                cls.hypervisor)
+        # Get Hosts in the cluster and iscsi/vmfs storages for that cluster
+        iscsi_pools = []
+        nfs_pools = []
+        try:
+            cls.list_vmware_clusters = list_clusters(cls.apiclient, hypervisor="vmware")
+        except Exception as e:
+            raise cls.skipTest(e)
+
+        if len(cls.list_vmware_clusters) < 1:
+            raise cls.skipTest("There is no cluster available in the setup")
+        else:
+            for cluster in cls.list_vmware_clusters:
+                try:
+                    list_esx_hosts = list_hosts(cls.apiclient, clusterid=cluster.id)
+                except Exception as e:
+                    raise cls.skipTest(e)
+                if len(list_esx_hosts) > 1:
+                    try:
+                        list_storage = list_storage_pools(cls.apiclient, clusterid=cluster.id)
+                    except Exception as e:
+                        raise cls.skipTest(e)
+                    for storage in list_storage:
+                        if storage.type == "VMFS":
+                            iscsi_pools.append(storage)
+                    if len(iscsi_pools) > 1:
+                        break
+                    else:
+                        iscsi_pools = []
+                    for storage in list_storage:
+                        if storage.type == "NetworkFilesystem":
+                            nfs_pools.append(storage)
+                    if len(nfs_pools) > 1:
+                        break
+                    else:
+                        nfs_pools = []
+        if len(iscsi_pools) < 2 and len(nfs_pools) < 2:
+            raise unittest.SkipTest("Not enough storage pools available in the setup")
+        cls.hosts = list_esx_hosts
+        cls.pools = list_storage
+
+        # Create an account
+        cls.account = Account.create(
+            cls.apiclient,
+            cls.testdata["account"],
+            domainid=cls.domain.id
+        )
+        cls._cleanup.append(cls.account)
+
+        # Create Service offering
+        cls.service_offering = ServiceOffering.create(
+            cls.apiclient,
+            cls.testdata["service_offering"]
+        )
+        cls._cleanup.append(cls.service_offering)
+
+        if cls.zone.localstorageenabled:
+            cls.testdata["service_offering"]["storagetype"] = 'local'
+            cls.service_offering_local1 = ServiceOffering.create(
+            cls.apiclient,
+            cls.testdata["service_offering"]
+            )
+            cls._cleanup.append(cls.service_offering_local1)
+
+        # Create Disk offering
+        cls.disk_offering = DiskOffering.create(
+            cls.apiclient,
+            cls.testdata["disk_offering"]
+        )
+        cls._cleanup.append(cls.disk_offering)
+        # Create disk offering for resize
+        cls.resized_disk_offering = DiskOffering.create(
+            cls.apiclient,
+            cls.testdata["resized_disk_offering"]
+        )
+        cls._cleanup.append(cls.resized_disk_offering)
+
+        if cls.zone.localstorageenabled:
+            cls.testdata["disk_offering"]["storagetype"] = 'local'
+            cls.disk_offering_local1 = DiskOffering.create(
+            cls.apiclient,
+            cls.testdata["disk_offering"]
+            )
+            cls._cleanup.append(cls.disk_offering_local1)
+
+        # Register windows 2012 server Template if it is not present
+        cls.windows_template = get_windows_template(
+            cls.apiclient,
+            cls.zone.id,
+            ostype_desc="Windows Server 2012 (64-bit)",
+            template_type="USER",
+            hypervisor="VMware",
+            template_filter="all"
+            )
+
+        #cls.template = get_windows_template(cls.apiclient, cls.zone.id ,ostype_desc="Windows Server 2012 (64-bit)")
+        cls.testdata["vgpu"]["Windows Server 2012 (64-bit)"]["url"] = "http://10.147.28.7/templates/Windows2012/WindowsServer2012.ova"
+        cls.testdata["vgpu"]["Windows Server 2012 (64-bit)"]["format"] = "OVA"
+
+        if cls.windows_template == FAILED:
+            if "http://pleaseupdateURL/dummy.vhd" in cls.testdata[
+                    "vgpu"]["Windows Server 2012 (64-bit)"]["url"]:
+                raise unittest.SkipTest(
+                    "Check Test Data file if it has the valid template URL")
+            cls.windows_template = Template.register(
+                cls.apiclient,
+                cls.testdata["vgpu"]["Windows Server 2012 (64-bit)"],
+                hypervisor="VMware",
+                zoneid=cls.zone.id,
+            )
+            timeout = cls.testdata["vgpu"]["timeout"]
+
+            while True:
+                time.sleep(cls.testdata["vgpu"]["sleep"])
+                list_template_response = Template.list(
+                    cls.apiclient,
+                    templatefilter=cls.testdata["templatefilter"],
+                    id=cls.windows_template.id
+                )
+                if (isinstance(list_template_response, list)) is not True:
+                    raise unittest.SkipTest(
+                        "Check list template api response returns a valid list")
+
+                if len(list_template_response) is None:
+                    raise unittest.SkipTest(
+                        "Check template registered is in List Templates")
+                template_response = list_template_response[0]
+                if template_response.isready:
+                    break
+                if timeout == 0:
+                    cls.debug("Failed to download windows template, we will be skipping windows related tests below")
+
+                timeout = timeout - 1
+
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+
+    def tearDown(self):
+
+        try:
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+       # Cancel maintenance state of all hosts
+        list_host = list_hosts(self.apiclient, listall=True)
+        for host in list_host:
+            if host.resourcestate == "Maintenance":
+                Host.cancelMaintenance(self.apiclient, id=host.id)
+                Host.getState(
+                              self.apiclient,
+                              host.id,
+                              "Up",
+                              "Enabled"
+                             )
+        # Cancel maintenance state of all storage pools
+        list_pools = list_storage_pools(self.apiclient, listall=True)
+        for pool in list_pools:
+            if pool.state == "Maintenance":
+                cmd = cancelStorageMaintenance.cancelStorageMaintenanceCmd()
+                cmd.id = pool.id
+                self.apiclient.cancelStorageMaintenance(cmd)
+                StoragePool.getState(
+                                     self.apiclient,
+                                     pool.id,
+                                     "Up"
+                                    )
+
+    def get_ssvm_state(self, apiclient, vmid, state, timeout=600):
+        """List VM and check if its state is as expected
+        @returnValue - List[Result, Reason]
+                       1) Result - FAIL if there is any exception
+                       in the operation or VM state does not change
+                       to expected state in given time else PASS
+                       2) Reason - Reason for failure"""
+
+        returnValue = [FAIL, "VM state not trasited to %s,\
+                        operation timed out" % state]
+
+        while timeout > 0:
+            try:
+                projectid = None
+                if hasattr(self, "projectid"):
+                    projectid = self.projectid
+                vms = list_ssvms(self.apiclient, projectid=projectid,
+                          id=vmid, listAll=True)
+                validationresult = validateList(vms)
+                if validationresult[0] == FAIL:
+                    raise Exception("VM list validation failed: %s" % validationresult[2])
+                elif str(vms[0].state).lower().decode("string_escape") == str(state).lower():
+                    returnValue = [PASS, None]
+                    break
+            except Exception as e:
+                returnValue = [FAIL, e]
+                break
+            time.sleep(60)
+            timeout -= 60
+        return returnValue
+
+    def deploy_virtual_machine(self, service_offering_id, vm, template_id):
+        """
+        Function to Deploy VMs
+        """
+        virtual_machine = VirtualMachine.create(
+        self.apiclient,
+        self.testdata[vm],
+        accountid=self.account.name,
+        zoneid=self.zone.id,
+        domainid=self.account.domainid,
+        serviceofferingid=service_offering_id,
+        templateid=template_id,
+        hostid=self.hosts[0].id
+        )
+        virtual_machine.getState(
+                                 self.apiclient,
+                                 "Running"
+                                 )
+
+        return virtual_machine
+
+    def GetDestinationHost(self, hostsToavoid, vm, scope):
+        """
+        This method gives us the destination host to which VM will be migrated
+        It takes the source host i.e. hostsToavoid as input
+        """
+        destinationHost = None
+        destinationCluster = None
+        host = list_hosts(self.apiclient, id=hostsToavoid)
+        clusters = list_clusters(self.apiclient, listall=True)
+        if scope == "across_cluster":
+            for cluster in clusters:
+                if cluster.id not in host[0].clusterid:
+                    hosts_in_cluster = list_hosts(self.apiclient, clusterid=cluster.id)
+                    if len(hosts_in_cluster) != 0:
+                        destinationCluster = cluster
+                        break
+            hosts = list_hosts(self.apiclient, clusterid=destinationCluster.id)
+            for host in hosts:
+                response = check_host_capacity(self, host.id, vm)
+                if response == PASS:
+                    destinationHost = host
+            return destinationHost
+        elif scope == "within_cluster":
+            hosts = list_hosts(self.apiclient, clusterid=host[0].clusterid)
+            for host in hosts:
+                response = check_host_capacity(self, host.id, vm)
+                if host.id not in hostsToavoid and response is PASS:
+                    destinationHost = host
+                    break
+            return destinationHost
+
+    def GetDestinationHostLocal(self, hostsToavoid, vm, scope):
+        """
+        This method gives us the destination host to which VM will be migrated
+        It takes the souce host i.e. hostsToavoid as input
+        """
+        destinationHost = None
+        destinationCluster = None
+        if scope == "across_cluster":
+            host = list_hosts(self.apiclient, id=hostsToavoid)
+            clusters = list_clusters(self.apiclient, listall=True)
+            for cluster in clusters:
+                if cluster.id not in host[0].clusterid:
+                    hosts_in_cluster = list_hosts(self.apiclient, clusterid=cluster.id)
+                    if len(hosts_in_cluster) != 0:
+                        destinationCluster = cluster
+                        break
+            hosts = list_hosts(self.apiclient, clusterid=destinationCluster.id)
+            for host in hosts:
+                response = check_host_capacity(self, host.id, vm)
+                if host.id not in hostsToavoid and response == PASS:
+                    pool = list_storage_pools(self.apiclient, scope="Host", name=host.name + " Local Storage")
+                    if pool:
+                        destinationHost = host
+                        break
+            return destinationHost
+
+        for host in self.hosts:
+            response = check_host_capacity(self, host.id, vm)
+            if host.id not in hostsToavoid and response == PASS:
+                pool = list_storage_pools(self.apiclient, scope="Host", name=host.name + " Local Storage")
+                if pool:
+                    destinationHost = host
+                    break
+        return destinationHost
+
+    def takeVmSnapshotNegative(self, vm_id):
+        """
+        This method takes VM snapshots and stores the exception
+        To be used in the negative scenario where we take snapshot when
+        migration is in progress
+        """
+        try:
+            with self.assertRaises(Exception):
+                VmSnapshot.create(self.apiclient, vmid=vm_id)
+
+        except Exception as e:
+            self.exceptionList.append(e)
+
+    def resizeVolumeNegative(self, volume):
+        """
+        This method resizes volume and stores the exception
+        To be used in the negative scenario where we resize a volume when
+        migration is in progress
+        """
+        try:
+            with self.assertRaises(Exception):
+                volume.resize(self.apiclient, diskofferingid=self.resized_disk_offering.id)
+
+        except Exception as e:
+            self.exceptionList.append(e)
+
+    def takeVolumeSnapshotNegative(self, volumeid):
+        """
+        This method takes volume snapshots and stores the exception
+        To be used in the negative scenario where we take snapshot when
+        migration is in progress
+        """
+        try:
+            with self.assertRaises(Exception):
+                Snapshot.create(self.apiclient, volume_id=volumeid)
+
+        except Exception as e:
+            self.exceptionList.append(e)
+
+    def stopVmNegative(self, vm):
+        """
+        This method tries to destroy a VM and stores the exception
+        To be used in the negative scenario where destroy a VM when
+        migration is in progress
+        """
+        try:
+            with self.assertRaises(Exception):
+                vm.stop(self.apiclient)
+        except Exception as e:
+            self.exceptionList.append(e)
+
+    @data(('VMFS', 'within_cluster', 'linux'), ('VMFS', 'within_cluster', 'windows'), ('VMFS', 'across_cluster', 'linux'), ('VMFS', 'across_cluster', 'windows'),
+          ('NetworkFilesystem', 'within_cluster', 'linux'), ('NetworkFilesystem', 'within_cluster', 'windows'), ('NetworkFilesystem', 'across_cluster', 'linux'),
+          ('NetworkFilesystem', 'across_cluster', 'windows'))
+    @unpack
+    @attr(tags=["advanced", "basic", "vmware", "vmfs", "shared"], required_hardware="true")
+    def test_01_vm_and_volumes_live_migration_for_vmware_vmfs(self, first_value, second_value, third_value):
+        """
+        This Test Path tests vMotion for NFS as well as VMFS within cluster,
+        across cluster and for both windows and linux VMs using DATA DRIVEN TESTING.
+        This test will run once for each of the 8 configurations give as @data
+        1. Migrate VM from one host to another
+        2. Migrate VMs ROOT volume from one storage to another
+        3. Migrate VM to another Host and ROOT volume to another storage
+        4. Attach a data disk to VM, migrate VM to a different host and its volumes to different pools.
+        5. Upload a volume, attach it to VM, migrate VM to a different host and its volumes to different pools.
+        6. Create volume snapshots on all volumes , migrate VM to a different host and its volumes to different pools.
+        7. Resize the data disk, migrate VM to a different host and its volumes to different pools.
+        8. Restore the VM, migrate VM to a different host and its volumes to different pools.
+        9. Detach the data disk, create another VM, attach the data disk to that VM and then migrate that VM and its volumes.
+        10. Detach upload volume, attach it to the 2nd VM, and then migrate that VM and its volumes.
+        11. Create snapshots for all volumes of 2nd vM, then migrate VM and its volumes.
+
+        After each storage migration step, following validation is done
+        a) Create VM snapshots to check data integrity - @method used : VmSnapshotToCheckDataIntegrity(self, vm)
+        b) Login to the Host/storage pool and check for the VMDK and VMX files for VM and its volumes - @method used : check_files(self, vm, destinationHost)
+        c) Check for VM accessibility by sshing to the VM - @method used : check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1)
+        """
+        storage_type = first_value
+        storage_scope = second_value
+        ostype = third_value
+
+        if ostype == 'windows' and not self.windows_template:
+            self.skipTest("Windows template is not present, so skipping this test")
+        elif ostype == 'windows':
+            template_id = self.windows_template.id
+        else:
+            template_id = self.template.id
+
+        count_host = 0
+        count_pool = 0
+        storage_pool = []
+        if len(self.list_vmware_clusters) < 2:
+            if (storage_scope == "across_cluster"):
+                raise self.skipTest("The setup doesn't have more than one cluster, so can't execute these set of tests")
+        if len(self.list_vmware_clusters) >= 2:
+            for cluster in self.list_vmware_clusters:
+                if len(list_hosts(self.apiclient, clusterid=cluster.id)) >= 1:
+                    count_host += 1
+                pools = list_storage_pools(self.apiclient, clusterid=cluster.id)
+                for pool in pools:
+                    if pool.storage == storage_type:
+                        storage_pool.append(pool)
+                if len(storage_pool) >= 1:
+                    count_pool += 1
+                storage_pool = []
+        #if storage_scope == "across_cluster":
+        if count_host < 2 or count_pool < 2:
+            raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, \
+            each having min 1 host and 1 vmfs storage pools")
+
+        self.debug("---------------This is the test no 1--------------")
+        """
+        Create a VM, live migrate the VM
+        """
+        vm = "virtual_machine2"
+        virtual_machine_1 = self.deploy_virtual_machine(self.service_offering.id, vm, template_id)
+        self.cleanup.append(virtual_machine_1)
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_1.id, listall=True)[0]
+        #Get destination host
+        destinationHost = self.GetDestinationHost(vm.hostid, virtual_machine_1, storage_scope)
+        #Migrate the VM
+        if storage_scope == "across_cluster":
+            vol_list = []
+            destinationPools = []
+            vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+            VmSnapshotToCheckDataIntegrity(self, vm)
+            check_files(self, vm, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+        else:
+            vm = MigrateVm(self, virtual_machine_1, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 2--------------")
+        """
+        Migrate the ROOT Volume
+        Can't migrate a volume to another cluster, so won't run this test in that case
+        """
+        # Get ROOT volume and destination pool
+        if storage_scope != "across_cluster":
+            vol_list = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)
+            root_vol = vol_list[0]
+            destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+            #Migrate ROOT volume
+            islive = True
+            MigrateDataVolume(self, root_vol, destinationPool, islive)
+            VmSnapshotToCheckDataIntegrity(self, vm)
+            check_files(self, vm, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 3--------------")
+        """
+        Migrate the VM and ROOT volume
+        """
+        #Get all volumes to be migrated
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_1.id, listall=True)[0]
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 4--------------")
+        """
+        Add a data disk and migrate vm, data disk and root disk
+        """
+
+        data_disk_1 = Volume.create(
+                                    self.apiclient,
+                                    self.testdata["volume"],
+                                    zoneid=self.zone.id,
+                                    account=self.account.name,
+                                    domainid=self.account.domainid,
+                                    diskofferingid=self.disk_offering.id
+                                  )
+
+        self.debug("Created volume with ID: %s" % data_disk_1.id)
+
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 5--------------")
+        """
+        Upload a Volume, Attach it to the VM, Migrate all the volumes and VM.
+        """
+        #upload a volume
+        self.testdata["configurableData"]["upload_volume"]["format"] = "OVA"
+        self.testdata["configurableData"]["upload_volume"]["url"] = "http://nfs1.lab.vmops.com/templates/burbank-systemvm-08012012.ova"
+        upload_volume = Volume.upload(
+                                      self.apiclient,
+                                      self.testdata["configurableData"]["upload_volume"],
+                                      account=self.account.name,
+                                      domainid=self.domain.id,
+                                      zoneid=self.zone.id
+                                      )
+        upload_volume.wait_for_upload(self.apiclient)
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        self.debug("........................checking for files before taking snapshot ..................................")
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 6--------------")
+        """
+        Create snapshots on all the volumes, Migrate all the volumes and VM.
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                         volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+        # Migrate all volumes and VMs
+        self.debug("..................................checking for files just after taking snapshot...................................")
+        check_files(self, vm, destinationHost)
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        self.debug("........................checking for files after taking snapshot and migrating VMs........................")
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 7--------------")
+        """
+        Resize the data volume , Migrate all the volumes and VM.
+        """
+        data_disk_1.resize(
+                           self.apiclient,
+                           diskofferingid=self.resized_disk_offering.id
+                           )
+        # Migrate all volumes and VMs
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 8--------------")
+        """
+        Restore the VM , Migrate all the volumes and VM.
+        """
+        virtual_machine_1.restore(self.apiclient)
+        virtual_machine_1.getState(
+                                     self.apiclient,
+                                     "Running"
+                                     )
+        # Migrate the VM and its volumes
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 9--------------")
+        """
+        Detach the Data disk, Deploy another VM, attach the data disk and migrate.
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        vm = "virtual_machine3"
+        virtual_machine_2 = self.deploy_virtual_machine(self.service_offering.id, vm, self.template.id)
+        self.cleanup.append(virtual_machine_2)
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_2.id, listall=True)[0]
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 10--------------")
+        """
+        Detach the uploaded volume, attach it to another vm and migrate.
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 11--------------")
+        """
+        Create snapshots on all the volumes, Migrate all the volumes and VM.
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                        volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+        # Migrate all volumes and VMs
+
+        destinationHost, destinationPools, vol_list = get_destination_pools_hosts(self, vm, storage_scope, storage_type)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+    @data(('within_cluster', 'linux'), ('within_cluster', 'windows'), ('across_cluster', 'linux'), ('across_cluster', 'windows'))
+    @unpack
+    @attr(tags=["advanced", "basic", "vmware", "vmfs", "local"], required_hardware="true")
+    def test_02_vm_and_volumes_live_migration_for_vmware_vmfs_local_storage(self, first_value, second_value):
+        """
+        This Test Path tests vMotion for VM in local storage within cluster,
+        across cluster and for both windows and linux VMs using DATA DRIVEN TESTING
+        1. Migrate VM from one host to another
+        2. Migrate VMs ROOT volume from one storage to another
+        3. Migrate VM to another Host and ROOT volume to another storage
+        4. Attach a data disk to VM, migrate VM to a different host and its volumes to different pools.
+        5. Upload a volume, attach it to VM, migrate VM to a different host and its volumes to different pools.
+        6. Create volume snapshots on all volumes , migrate VM to a different host and its volumes to different pools.
+        7. Resize the data disk, migrate VM to a different host and its volumes to different pools.
+        8. Restore the VM, migrate VM to a different host and its volumes to different pools.
+        9. Detach the data disk, create another VM, attach the data disk to that VM and then migrate that VM and its volumes.
+        10. Detach upload volume, attach it to the 2nd VM, and then migrate that VM and its volumes.
+        11. Create snapshots for all volumes of 2nd vM, then migrate VM and its volumes.
+
+        After each storage migration step, following validation is done
+        a) Create VM snapshots to check data integrity - @method used : VmSnapshotToCheckDataIntegrity(self, vm)
+        c) Check for VM accessibility by sshing to the VM - @method used : check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1)
+        """
+        if  not self.zone.localstorageenabled:
+            raise self.skipTest("The setup doesn't have local storage enabled")
+        scope = first_value
+        ostype = second_value
+        if ostype == 'windows' and not self.windows_template:
+            self.skipTest("Windows template is not present, so skipping this test")
+        elif ostype == 'windows':
+            template_id = self.windows_template.id
+        else:
+            template_id = self.template.id
+
+        count_host = 0
+        count_pool = 0
+        pool_local = []
+        if len(self.list_vmware_clusters) < 2:
+            if (scope == "across_cluster"):
+                raise self.skipTest("The setup doesn't have more than one cluster, so can't execute these set of tests")
+        if len(self.list_vmware_clusters) >= 2:
+            for cluster in self.list_vmware_clusters:
+                if len(list_hosts(self.apiclient, clusterid=cluster.id)) >= 1:
+                    count_host += 1
+                pools = list_storage_pools(self.apiclient, clusterid=cluster.id)
+                for pool in pools:
+                    if pool.scope == "HOST":
+                        pool_local.append(pool)
+                if len(pool_local) >= 1:
+                    count_pool += 1
+                pool_local = []
+        if scope == "across_cluster":
+            if count_host < 2:
+                raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, each having min 2 hosts ")
+
+        self.debug("---------------This is the test no 1--------------")
+        """
+        Create a VM, live migrate the VM
+        """
+        vm = "virtual_machine2"
+        virtual_machine_1 = self.deploy_virtual_machine(self.service_offering_local1.id, vm, template_id)
+        self.cleanup.append(virtual_machine_1)
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_1.id, listall=True)[0]
+
+        #Get destination host
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, virtual_machine_1, scope)
+        #Migrate the VM
+        vol_list = []
+        destinationPools = []
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 2--------------")
+        """
+        Add a data disk and migrate vm
+        """
+
+        data_disk_1 = Volume.create(
+                                    self.apiclient,
+                                    self.testdata["volume"],
+                                    zoneid=self.zone.id,
+                                    account=self.account.name,
+                                    domainid=self.account.domainid,
+                                    diskofferingid=self.disk_offering_local1.id
+                                  )
+
+        self.debug("Created volume with ID: % s" % data_disk_1.id)
+
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 3--------------")
+        """
+        Upload a Volume, Attach it to the VM, Migrate all the volumes and VM.
+        """
+        #upload a volume
+        self.testdata["configurableData"]["upload_volume"]["format"] = "OVA"
+        self.testdata["configurableData"]["upload_volume"]["url"] = "http://nfs1.lab.vmops.com/templates/burbank-systemvm-08012012.ova"
+        upload_volume = Volume.upload(
+                                      self.apiclient,
+                                      self.testdata["configurableData"]["upload_volume"],
+                                      account=self.account.name,
+                                      domainid=self.domain.id,
+                                      zoneid=self.zone.id
+                                      )
+        upload_volume.wait_for_upload(self.apiclient)
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 6--------------")
+        """
+        Create snapshots on all the volumes, Migrate VM.
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                        volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+        # Migrate all volumes and VMs
+
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 7--------------")
+        """
+        Resize the data volume , Migrate all the volumes and VM.
+        """
+        data_disk_1.resize(
+                           self.apiclient,
+                           diskofferingid=self.resized_disk_offering.id
+                           )
+        # Migrate all volumes and VMs
+        destinationHost = self.GetDestinationHostLocal(virtual_machine_1.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 8--------------")
+        """
+        Restore the VM , Migrate all the volumes and VM.
+        """
+        virtual_machine_1.restore(self.apiclient)
+        virtual_machine_1.getState(
+                                     self.apiclient,
+                                     "Running"
+                                     )
+        # Migrate the VM and its volumes
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 9--------------")
+        """
+        Detach the Data disk, Deploy another VM, attach the data disk and migrate.
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        vm = "virtual_machine3"
+        virtual_machine_2 = self.deploy_virtual_machine(self.service_offering_local1.id, vm, self.template.id)
+        self.cleanup.append(virtual_machine_2)
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_2.id, listall=True)[0]
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 10--------------")
+        """
+        Detach the uploaded volume, attach it to another vm and migrate.
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 11--------------")
+        """
+        Create snapshots on all the volumes, Migrate all the volumes and VM.
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                        volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+        # Migrate all volumes and VMs
+
+        destinationHost = self.GetDestinationHostLocal(vm.hostid, vm, scope)
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+    @data(('VMFS', 'within_cluster', 'linux'), ('VMFS', 'within_cluster', 'windows'), ('VMFS', 'across_cluster', 'linux'), ('VMFS', 'across_cluster', 'windows'),
+          ('NetworkFilesystem', 'within_cluster', 'linux'), ('NetworkFilesystem', 'within_cluster', 'windows'), ('NetworkFilesystem', 'across_cluster', 'linux'),
+          ('NetworkFilesystem', 'across_cluster', 'windows'))
+    @unpack
+    @attr(tags=["advanced", "basic", "vmware", "vmfs", "zwps"], required_hardware="true")
+    def test_03_vm_and_volumes_live_migration_for_vmware_vmfs_across_zwps_cwps(self, first_value, second_value, third_value):
+        """
+        This Test Path tests vMotion for NFS as well as VMFS within cluster,
+        across cluster and across zones and for both windows and linux VMs using DATA DRIVEN TESTING
+        1. Migrate VM from one host to another
+        2. Migrate VMs ROOT volume from one storage to another
+        3. Migrate VM to another Host and ROOT volume to another storage
+        4. Attach a data disk to VM, migrate VM to a different host and its volumes to different pools.
+        5. Upload a volume, attach it to VM, migrate VM to a different host and its volumes to different pools.
+        6. Create volume snapshots on all volumes , migrate VM to a different host and its volumes to different pools.
+        7. Resize the data disk, migrate VM to a different host and its volumes to different pools.
+        8. Restore the VM, migrate VM to a different host and its volumes to different pools.
+        9. Detach the data disk, create another VM, attach the data disk to that VM and then migrate that VM and its volumes.
+        10. Detach upload volume, attach it to the 2nd VM, and then migrate that VM and its volumes.
+        11. Create snapshots for all volumes of 2nd vM, then migrate VM and its volumes.
+
+        After each storage migration step, following validation is done
+        a) Create VM snapshots to check data integrity - @method used : VmSnapshotToCheckDataIntegrity(self, vm)
+        b) Login to the Host/storage pool and check for the VMDK and VMX files for VM and its volumes - @method used : check_files(self, vm, destinationHost)
+        c) Check for VM accessibility by sshing to the VM - @method used : check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1)
+        """
+        storage_type = first_value
+        storage_scope = second_value
+        ostype = third_value
+
+        if ostype == 'windows' and not self.windows_template:
+            self.skipTest("Windows template is not present, so skipping this test")
+        elif ostype == 'windows':
+            template_id = self.windows_template.id
+        else:
+            template_id = self.template.id
+
+        scope = "ZONE"
+        list_zwps_pools = list_storage_pools(self.apiclient, scope="ZONE", listall=True)
+        zwps_pools = []
+        for pool in list_zwps_pools:
+            if pool.type == storage_type:
+                zwps_pools.append(pool)
+        if len(zwps_pools) < 2:
+            raise self.skipTest("The setup doesn't have enough zone wide primary storages of %s type, we need atleast 2" % storage_type)
+
+        count_host = 0
+        count_pool = 0
+        pool_vmfs = []
+        if len(self.list_vmware_clusters) < 2:
+            if storage_scope == "across_cluster":
+                raise self.skipTest("The setup doesn't have more than one cluster, so can't execute these set of tests")
+        if len(self.list_vmware_clusters) >= 2:
+            for cluster in self.list_vmware_clusters:
+                if len(list_hosts(self.apiclient, clusterid=cluster.id)) >= 1:
+                    count_host += 1
+                pools = list_storage_pools(self.apiclient, clusterid=cluster.id)
+                for pool in pools:
+                    if pool.storage is storage_type:
+                        pool_vmfs.append(pool)
+                if len(pool_vmfs) >= 1:
+                    count_pool += 1
+                pool_vmfs = []
+        #if storage_scope == "across_cluster":
+        if count_host < 2 | count_pool < 2:
+            raise self.skipTest("The setup doesn't have enough pools or enough hosts. To run these tests the setup must have atleast 2 clusters, each having min 1 host and 1 vmfs storage pools")
+
+        self.debug("---------------This is the test no 1--------------")
+        """
+        Create a VM, live migrate the VM
+        """
+        vm = "virtual_machine2"
+        virtual_machine_1 = self.deploy_virtual_machine(self.service_offering.id, vm, template_id)
+        self.cleanup.append(virtual_machine_1)
+        #Get destination host
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_1.id, listall=True)[0]
+        destinationHost = self.GetDestinationHost(vm.hostid, virtual_machine_1, storage_scope)
+        #Migrate the VM
+        if storage_scope == "different_cluster":
+            vol_list = []
+            destinationPools = []
+            vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+            VmSnapshotToCheckDataIntegrity(self, vm)
+            check_files(self, vm, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+        else:
+            vm = MigrateVm(self, virtual_machine_1, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 2--------------")
+        """
+        Migrate the ROOT Volume to zwps
+        Can't migrate a volume to another cluster, so won't run this test in that case
+        """
+        # Get ROOT volume and destination pool
+        if storage_scope != "across_cluster":
+            vol_list = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)
+            root_vol = vol_list[0]
+            destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+            #Migrate ROOT volume
+            islive = True
+            MigrateDataVolume(self, root_vol, destinationPool, islive)
+            VmSnapshotToCheckDataIntegrity(self, vm)
+            check_files(self, vm, destinationHost)
+            check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 3--------------")
+        """
+        Migrate the VM and ROOT volume to zwps
+        """
+        #Get all volumes to be migrated
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 4--------------")
+        """
+        Add a data disk and migrate vm, data disk to zwps and root disk to cwps
+        """
+
+        data_disk_1 = Volume.create(
+                                    self.apiclient,
+                                    self.testdata["volume"],
+                                    zoneid=self.zone.id,
+                                    account=self.account.name,
+                                    domainid=self.account.domainid,
+                                    diskofferingid=self.disk_offering.id
+                                  )
+
+        self.debug("Created volume with ID: %s" % data_disk_1.id)
+
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)[0]
+        vol_list.append(data_vol)
+        #get destination Pool for DATA volume
+        destinationPool = GetDestinationStoragePool(self, data_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 5--------------")
+        """
+        Upload a Volume, Attach it to the VM, Migrate all the volumes and VM.
+        1st data disk to zwps
+        2nd data disk to cwps
+        root disk to zwps
+        """
+        #upload a volume
+        self.testdata["configurableData"]["upload_volume"]["format"] = "OVA"
+        self.testdata["configurableData"]["upload_volume"]["url"] = "http://nfs1.lab.vmops.com/templates/burbank-systemvm-08012012.ova"
+        upload_volume = Volume.upload(
+                                      self.apiclient,
+                                      self.testdata["configurableData"]["upload_volume"],
+                                      account=self.account.name,
+                                      domainid=self.domain.id,
+                                      zoneid=self.zone.id
+                                      )
+        upload_volume.wait_for_upload(self.apiclient)
+        virtual_machine_1.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        self.debug("..............these are the volumes %s " % vol_list)
+        self.debug("..............these are the pools %s " % destinationPools)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("........................checking for files before taking snapshot ..................................")
+        check_files(self, vm, destinationHost)
+
+        self.debug("---------------This is the test no 6--------------")
+        """
+        Create snapshots on all the volumes, Migrate all the volumes and VM.
+        root disk to cwps
+        data1 to cwps
+        data2 to zwps
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                        volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+        # Migrate all volumes and VMs
+        self.debug("..................................checking for files just after taking snapshot...................................")
+        check_files(self, vm, destinationHost)
+        # Get destination Host
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("........................checking for files after taking snapshot and migrating VMs........................")
+        check_files(self, vm, destinationHost)
+
+        self.debug("---------------This is the test no 7--------------")
+        """
+        Resize the data volume , Migrate all the volumes and VM.
+        root disk to zwps
+        data1 to zwps
+        data2 to zwps
+        """
+        data_disk_1.resize(
+                           self.apiclient,
+                           diskofferingid=self.resized_disk_offering.id
+                           )
+        # Migrate all volumes and VMs
+        # Get destination Host
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 8--------------")
+        """
+        Restore the VM , Migrate all the volumes and VM.
+        root to cpws
+        data1 to zwps
+        data2 to cwps
+        """
+        virtual_machine_1.restore(self.apiclient)
+        virtual_machine_1.getState(
+                                     self.apiclient,
+                                     "Running"
+                                     )
+
+        # Get destination Host
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_1, ostype)
+
+        self.debug("---------------This is the test no 9--------------")
+        """
+        Detach the Data disk, Deploy another VM, attach the data disk and migrate.
+        root to zwps
+        data to cwps
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        vm = "virtual_machine3"
+        virtual_machine_2 = self.deploy_virtual_machine(self.service_offering.id, vm, self.template.id)
+        self.cleanup.append(virtual_machine_2)
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         data_disk_1
+                         )
+        # Get destination Host
+        vm = list_virtual_machines(self.apiclient, id=virtual_machine_2.id, listall=True)[0]
+        destinationHost = self.GetDestinationHost(vm.hostid, virtual_machine_2, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=virtual_machine_2.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=virtual_machine_2.id, type="DATADISK", listall=True)[0]
+        vol_list.append(data_vol)
+        #get destination Pool for DATA volume
+        destinationPool = GetDestinationStoragePool(self, data_vol.storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 10--------------")
+        """
+        Detach the uploaded volume, attach it to another vm and migrate.
+        root to cwps
+        data1 to zwps
+        data2 to zwps
+        """
+
+        virtual_machine_1.detach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+
+        virtual_machine_2.attach_volume(
+                         self.apiclient,
+                         upload_volume
+                         )
+        # Get destination Host
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+        self.debug("---------------This is the test no 11--------------")
+        """
+        Create snapshots on all the volumes, Migrate all the volumes and VM.
+        root to zwps
+        data1 to cwps
+        data2 to zwps
+        """
+        #Get ROOT Volume
+        vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm.id, listall=True)
+        for vol in vol_for_snap:
+            snapshot = Snapshot.create(
+                                        self.apiclient,
+                                        volume_id=vol.id
+                                        )
+            snapshot.validateState(
+                                   self.apiclient,
+                                    snapshotstate="backedup",
+                                    )
+
+        # Get destination Host
+        destinationHost = self.GetDestinationHost(vm.hostid, vm, storage_scope)
+        vol_list = []
+        destinationPools = []
+        #list ROOT volume
+        root_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT", listall=True)[0]
+        vol_list.append(root_vol)
+        #get destination Pool for ROOT volume
+        destinationPool = GetDestinationStoragePool(self, root_vol.storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        #list DATA volume
+        data_vol = list_volumes(self.apiclient, virtualmachineid=vm.id, type="DATADISK", listall=True)
+        #vol_list.append(data_vol)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[0])
+        destinationPool = GetDestinationStoragePool(self, data_vol[0].storage, storage_scope, storage_type)
+        destinationPools.append(destinationPool)
+        #get destination Pool for 1st DATA volume
+        vol_list.append(data_vol[1])
+        destinationPool = GetDestinationStoragePool(self, data_vol[1].storage, scope, storage_type)
+        destinationPools.append(destinationPool)
+        # Migrate and verify
+        vm = MigrateVmWithVolume(self, virtual_machine_2, destinationHost, vol_list, destinationPools)
+        VmSnapshotToCheckDataIntegrity(self, vm)
+        check_files(self, vm, destinationHost)
+        check_for_vm_access_by_ssh_using_nat(self, virtual_machine_2, ostype)
+
+    @data(('within_cluster', 'linux'), ('within_cluster', 'windows'), ('across_cluster', 'linux'), ('across_cluster', 'windows'))
+    @unpack
+    @attr(tags=["advanced", "basic", "vmware", "vmfs", "tagged"], required_hardware="true")
+    def test_04_vm_and_volumes_live_migration_for_vmware_across_nfs_vmfs(self, first_value, second_value):
+        """
+        This Test Path tests vMotion from NFS <---> VMFS , within cluster
+        across cluster and across zones and for both windows and linux VMs using DATA DRIVEN TESTING
+        1. Migr

<TRUNCATED>

[46/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix 3 more findbugs performance alerts, creating Long and Integer unnecessarily F5BigIpResource.java:297, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High F5BigIpResource.java:703, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High F5BigIpResource.java:889, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High Boxing/unboxing to parse a primitive com.cloud.network.resource.F5BigIpResource

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #366


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/039d632a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/039d632a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/039d632a

Branch: refs/heads/feature/vpc-ipv6
Commit: 039d632ad3143dce5b39b0202092f0c6a6d09c44
Parents: e8c7069
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:33:49 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:40:59 2015 +0200

----------------------------------------------------------------------
 .../f5/src/com/cloud/network/resource/F5BigIpResource.java     | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/039d632a/plugins/network-elements/f5/src/com/cloud/network/resource/F5BigIpResource.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/f5/src/com/cloud/network/resource/F5BigIpResource.java b/plugins/network-elements/f5/src/com/cloud/network/resource/F5BigIpResource.java
index 0f70a9b..c4e0fdf 100644
--- a/plugins/network-elements/f5/src/com/cloud/network/resource/F5BigIpResource.java
+++ b/plugins/network-elements/f5/src/com/cloud/network/resource/F5BigIpResource.java
@@ -294,7 +294,7 @@ public class F5BigIpResource implements ServerResource {
             for (IpAddressTO ip : ips) {
                 // is it saver to use Long.valueOf(BroadcastDomain.getValue(ip.getBroadcastUri())) ???
                 // i.o.w. can this contain vlan:// then change !!!
-                long guestVlanTag = Long.valueOf(ip.getBroadcastUri());
+                long guestVlanTag = Long.parseLong(ip.getBroadcastUri());
                 // It's a hack, using isOneToOneNat field for indicate if it's inline or not
                 boolean inline = ip.isOneToOneNat();
                 String vlanSelfIp = inline ? tagAddressWithRouteDomain(ip.getVlanGateway(), guestVlanTag) : ip.getVlanGateway();
@@ -700,7 +700,7 @@ public class F5BigIpResource implements ServerResource {
                 List<String> poolMembers = getMembers(virtualServerName);
                 for (String poolMember : poolMembers) {
                     String[] destIpAndPort = getIpAndPort(poolMember);
-                    deletePoolMember(virtualServerName, destIpAndPort[0], Integer.valueOf(destIpAndPort[1]));
+                    deletePoolMember(virtualServerName, destIpAndPort[0], Integer.parseInt(destIpAndPort[1]));
                 }
 
                 // Delete the virtual server
@@ -886,7 +886,7 @@ public class F5BigIpResource implements ServerResource {
         for (String member : allPoolMembers) {
             if (!activePoolMembers.contains(member)) {
                 String[] ipAndPort = member.split("-");
-                deletePoolMember(virtualServerName, ipAndPort[0], Integer.valueOf(ipAndPort[1]));
+                deletePoolMember(virtualServerName, ipAndPort[0], Integer.parseInt(ipAndPort[1]));
             }
         }
     }


[44/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix last ocurrence of unnecessary boxing detected by findbugs CiscoVnmcResource.java:266, DM_BOXED_PRIMITIVE_FOR_PARSING, Priority: High Boxing/unboxing to parse a primitive com.cloud.network.resource.CiscoVnmcResource.getIpRangeFromCidr(String)

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #368


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9bac84a3
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9bac84a3
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9bac84a3

Branch: refs/heads/feature/vpc-ipv6
Commit: 9bac84a3f7054401588e8eacc28668fe0c6959db
Parents: 38c269d
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 21:41:23 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:39:02 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/network/resource/CiscoVnmcResource.java          | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9bac84a3/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java
index 44260e4..4b8ee6f 100644
--- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java
+++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java
@@ -263,7 +263,7 @@ public class CiscoVnmcResource implements ServerResource {
         String[] result = new String[2];
         String[] cidrData = cidr.split("\\/");
         assert (cidrData.length == 2) : "Something is wrong with source cidr " + cidr;
-        long size = Long.valueOf(cidrData[1]);
+        long size = Long.parseLong(cidrData[1]);
         result[0] = cidrData[0];
         result[1] = cidrData[0];
         if (size < 32) {


[12/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Mocking the Connect object used in the LibvirtMigrateCommandWrapper

When executing the tests in an environment where Libvirt is also installed, it
caused errors.

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>

This closes #342


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/607a63b1
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/607a63b1
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/607a63b1

Branch: refs/heads/feature/vpc-ipv6
Commit: 607a63b12d1ca5d76d31177ecef3806d08628b41
Parents: 6c1dde8
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Mon Jun 1 14:45:43 2015 +0200
Committer: Rohit Yadav <ro...@shapeblue.com>
Committed: Mon Jun 1 21:11:25 2015 +0200

----------------------------------------------------------------------
 .../resource/wrapper/LibvirtMigrateCommandWrapper.java  |  2 +-
 .../kvm/resource/wrapper/LibvirtUtilitiesHelper.java    |  4 ++++
 .../kvm/resource/LibvirtComputingResourceTest.java      | 12 ++++++------
 3 files changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/607a63b1/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index 235793c..1847818 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@ -86,7 +86,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
              */
             xmlDesc = dm.getXMLDesc(0).replace(libvirtComputingResource.getPrivateIp(), command.getDestinationIp());
 
-            dconn = new Connect("qemu+tcp://" + command.getDestinationIp() + "/system");
+            dconn = libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system");
 
             //run migration in thread so we can monitor it
             s_logger.info("Live migration of instance " + vmName + " initiated");

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/607a63b1/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java
index 98bf642..8c8f6e6 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java
@@ -86,4 +86,8 @@ public class LibvirtUtilitiesHelper {
     public String retrieveBashScriptPath() {
         return LibvirtComputingResource.BASH_SCRIPT_PATH;
     }
+
+    public Connect retrieveQemuConnection(final String qemuURI) throws LibvirtException {
+        return new Connect(qemuURI);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/607a63b1/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
index eaaed39..d3c8824 100644
--- a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
+++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -1205,13 +1205,10 @@ public class LibvirtComputingResourceTest {
         verify(vm, times(1)).getNics();
     }
 
-    @Test(expected = UnsatisfiedLinkError.class)
+    @Test
     public void testMigrateCommand() {
-        // The Connect constructor used inside the LibvirtMigrateCommandWrapper has a call to native methods, which
-        // makes difficult to test it now.
-        // Will keep it expecting the UnsatisfiedLinkError and fix later.
-
         final Connect conn = Mockito.mock(Connect.class);
+        final Connect dconn = Mockito.mock(Connect.class);
         final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);
 
         final String vmName = "Test";
@@ -1225,6 +1222,8 @@ public class LibvirtComputingResourceTest {
         when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
         try {
             when(libvirtUtilitiesHelper.getConnectionByVmName(vmName)).thenReturn(conn);
+            when(libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system")).thenReturn(dconn);
+
         } catch (final LibvirtException e) {
             fail(e.getMessage());
         }
@@ -1259,11 +1258,12 @@ public class LibvirtComputingResourceTest {
         assertNotNull(wrapper);
 
         final Answer answer = wrapper.execute(command, libvirtComputingResource);
-        assertFalse(answer.getResult());
+        assertTrue(answer.getResult());
 
         verify(libvirtComputingResource, times(1)).getLibvirtUtilitiesHelper();
         try {
             verify(libvirtUtilitiesHelper, times(1)).getConnectionByVmName(vmName);
+            verify(libvirtUtilitiesHelper, times(1)).retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system");
         } catch (final LibvirtException e) {
             fail(e.getMessage());
         }


[35/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Coverity issue 1116812 - Replacing concatenation with optionsBuffer.append(option.getKey()).append('=').append(option.getValue()).append(',');

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/6138d9a6
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/6138d9a6
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/6138d9a6

Branch: refs/heads/feature/vpc-ipv6
Commit: 6138d9a69034a893a08602e06daec5d06e6d842f
Parents: 9ff3848
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:45:57 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:05 2015 +0200

----------------------------------------------------------------------
 .../apache/cloudstack/utils/qemu/QemuImg.java   | 101 ++++++++++---------
 1 file changed, 52 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6138d9a6/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
index 500c2d0..802bc9d 100644
--- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
+++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
@@ -19,12 +19,13 @@ package org.apache.cloudstack.utils.qemu;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
-import com.cloud.storage.Storage;
 
-import com.cloud.utils.script.Script;
-import com.cloud.utils.script.OutputInterpreter;
 import sun.reflect.generics.reflectiveObjects.NotImplementedException;
 
+import com.cloud.storage.Storage;
+import com.cloud.utils.script.OutputInterpreter;
+import com.cloud.utils.script.Script;
+
 public class QemuImg {
 
     /* The qemu-img binary. We expect this to be in $PATH */
@@ -36,13 +37,13 @@ public class QemuImg {
         RAW("raw"), QCOW2("qcow2"), VMDK("vmdk"), FILE("file"), RBD("rbd"), SHEEPDOG("sheepdog"), HTTP("http"), HTTPS("https"), TAR("tar"), DIR("dir");
         String format;
 
-        private PhysicalDiskFormat(String format) {
+        private PhysicalDiskFormat(final String format) {
             this.format = format;
         }
 
         @Override
         public String toString() {
-            return this.format;
+            return format;
         }
     }
 
@@ -53,33 +54,34 @@ public class QemuImg {
 
         private final String preallocationType;
 
-        private PreallocationType(String preallocationType){
+        private PreallocationType(final String preallocationType){
             this.preallocationType = preallocationType;
         }
 
+        @Override
         public String toString(){
-            return this.preallocationType;
+            return preallocationType;
         }
 
-        public static PreallocationType getPreallocationType(Storage.ProvisioningType provisioningType){
+        public static PreallocationType getPreallocationType(final Storage.ProvisioningType provisioningType){
             switch (provisioningType){
-                case THIN:
-                    return PreallocationType.Off;
-                case SPARSE:
-                    return PreallocationType.Metadata;
-                case FAT:
-                    return PreallocationType.Full;
-                default:
-                    throw new NotImplementedException();
+            case THIN:
+                return PreallocationType.Off;
+            case SPARSE:
+                return PreallocationType.Metadata;
+            case FAT:
+                return PreallocationType.Full;
+            default:
+                throw new NotImplementedException();
             }
         }
     }
 
-    public QemuImg(int timeout) {
+    public QemuImg(final int timeout) {
         this.timeout = timeout;
     }
 
-    public void setTimeout(int timeout) {
+    public void setTimeout(final int timeout) {
         this.timeout = timeout;
     }
 
@@ -91,14 +93,14 @@ public class QemuImg {
      *            A alternative path to the qemu-img binary
      * @return void
      */
-    public QemuImg(String qemuImgPath) {
-        this._qemuImgPath = qemuImgPath;
+    public QemuImg(final String qemuImgPath) {
+        _qemuImgPath = qemuImgPath;
     }
 
     /* These are all methods supported by the qemu-img tool */
 
     /* Perform a consistency check on the disk image */
-    public void check(QemuImgFile file) {
+    public void check(final QemuImgFile file) {
 
     }
 
@@ -116,16 +118,16 @@ public class QemuImg {
      *            pairs which are passed on to qemu-img without validation.
      * @return void
      */
-    public void create(QemuImgFile file, QemuImgFile backingFile, Map<String, String> options) throws QemuImgException {
-        Script s = new Script(_qemuImgPath, timeout);
+    public void create(final QemuImgFile file, final QemuImgFile backingFile, final Map<String, String> options) throws QemuImgException {
+        final Script s = new Script(_qemuImgPath, timeout);
         s.add("create");
 
         if (options != null && !options.isEmpty()) {
             s.add("-o");
             final StringBuilder optionsStr = new StringBuilder();
-            Iterator<Map.Entry<String, String>> optionsIter = options.entrySet().iterator();
+            final Iterator<Map.Entry<String, String>> optionsIter = options.entrySet().iterator();
             while(optionsIter.hasNext()){
-                Map.Entry option = optionsIter.next();
+                final Map.Entry option = optionsIter.next();
                 optionsStr.append(option.getKey()).append('=').append(option.getValue());
                 if(optionsIter.hasNext()){
                     //Add "," only if there are more options
@@ -138,7 +140,7 @@ public class QemuImg {
         /*
             -b for a backing file does not show up in the docs, but it works.
             Shouldn't this be -o backing_file=filename instead?
-        */
+         */
         s.add("-f");
         if (backingFile != null) {
             s.add(backingFile.getFormat().toString());
@@ -155,7 +157,7 @@ public class QemuImg {
             throw new QemuImgException("No size was passed, and no backing file was passed");
         }
 
-        String result = s.execute();
+        final String result = s.execute();
         if (result != null) {
             throw new QemuImgException(result);
         }
@@ -170,7 +172,7 @@ public class QemuImg {
      *            The file to create
      * @return void
      */
-    public void create(QemuImgFile file) throws QemuImgException {
+    public void create(final QemuImgFile file) throws QemuImgException {
         this.create(file, null, null);
     }
 
@@ -185,7 +187,7 @@ public class QemuImg {
      *            A backing file if used (for example with qcow2)
      * @return void
      */
-    public void create(QemuImgFile file, QemuImgFile backingFile) throws QemuImgException {
+    public void create(final QemuImgFile file, final QemuImgFile backingFile) throws QemuImgException {
         this.create(file, backingFile, null);
     }
 
@@ -201,7 +203,7 @@ public class QemuImg {
      *            pairs which are passed on to qemu-img without validation.
      * @return void
      */
-    public void create(QemuImgFile file, Map<String, String> options) throws QemuImgException {
+    public void create(final QemuImgFile file, final Map<String, String> options) throws QemuImgException {
         this.create(file, null, options);
     }
 
@@ -221,8 +223,8 @@ public class QemuImg {
      *            pairs which are passed on to qemu-img without validation.
      * @return void
      */
-    public void convert(QemuImgFile srcFile, QemuImgFile destFile, Map<String, String> options) throws QemuImgException {
-        Script s = new Script(_qemuImgPath, timeout);
+    public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map<String, String> options) throws QemuImgException {
+        final Script s = new Script(_qemuImgPath, timeout);
         s.add("convert");
         // autodetect source format. Sometime int he future we may teach KVMPhysicalDisk about more formats, then we can explicitly pass them if necessary
         //s.add("-f");
@@ -232,10 +234,11 @@ public class QemuImg {
 
         if (options != null && !options.isEmpty()) {
             s.add("-o");
-            String optionsStr = "";
-            for (Map.Entry<String, String> option : options.entrySet()) {
-                optionsStr += option.getKey() + "=" + option.getValue() + ",";
+            final StringBuffer optionsBuffer = new StringBuffer();
+            for (final Map.Entry<String, String> option : options.entrySet()) {
+                optionsBuffer.append(option.getKey()).append('=').append(option.getValue()).append(',');
             }
+            String optionsStr = optionsBuffer.toString();
             optionsStr = optionsStr.replaceAll(",$", "");
             s.add(optionsStr);
         }
@@ -243,7 +246,7 @@ public class QemuImg {
         s.add(srcFile.getFileName());
         s.add(destFile.getFileName());
 
-        String result = s.execute();
+        final String result = s.execute();
         if (result != null) {
             throw new QemuImgException(result);
         }
@@ -266,7 +269,7 @@ public class QemuImg {
      *            The destination file
      * @return void
      */
-    public void convert(QemuImgFile srcFile, QemuImgFile destFile) throws QemuImgException {
+    public void convert(final QemuImgFile srcFile, final QemuImgFile destFile) throws QemuImgException {
         this.convert(srcFile, destFile, null);
     }
 
@@ -280,7 +283,7 @@ public class QemuImg {
      *            The file of which changes have to be committed
      * @return void
      */
-    public void commit(QemuImgFile file) throws QemuImgException {
+    public void commit(final QemuImgFile file) throws QemuImgException {
 
     }
 
@@ -298,22 +301,22 @@ public class QemuImg {
      *            A QemuImgFile object containing the file to get the information from
      * @return A HashMap with String key-value information as returned by 'qemu-img info'
      */
-    public Map<String, String> info(QemuImgFile file) throws QemuImgException {
-        Script s = new Script(_qemuImgPath);
+    public Map<String, String> info(final QemuImgFile file) throws QemuImgException {
+        final Script s = new Script(_qemuImgPath);
         s.add("info");
         s.add(file.getFileName());
-        OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
-        String result = s.execute(parser);
+        final OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+        final String result = s.execute(parser);
         if (result != null) {
             throw new QemuImgException(result);
         }
 
-        HashMap<String, String> info = new HashMap<String, String>();
-        String[] outputBuffer = parser.getLines().trim().split("\n");
+        final HashMap<String, String> info = new HashMap<String, String>();
+        final String[] outputBuffer = parser.getLines().trim().split("\n");
         for (int i = 0; i < outputBuffer.length; i++) {
-            String[] lineBuffer = outputBuffer[i].split(":", 2);
+            final String[] lineBuffer = outputBuffer[i].split(":", 2);
             if (lineBuffer.length == 2) {
-                String key = lineBuffer[0].trim().replace(" ", "_");
+                final String key = lineBuffer[0].trim().replace(" ", "_");
                 String value = null;
 
                 if (key.equals("virtual_size")) {
@@ -353,7 +356,7 @@ public class QemuImg {
      * @param delta
      *            Flag if the new size is a delta
      */
-    public void resize(QemuImgFile file, long size, boolean delta) throws QemuImgException {
+    public void resize(final QemuImgFile file, final long size, final boolean delta) throws QemuImgException {
         String newSize = null;
 
         if (size == 0) {
@@ -373,7 +376,7 @@ public class QemuImg {
             newSize = Long.toString(size);
         }
 
-        Script s = new Script(_qemuImgPath);
+        final Script s = new Script(_qemuImgPath);
         s.add("resize");
         s.add(file.getFileName());
         s.add(newSize);
@@ -393,7 +396,7 @@ public class QemuImg {
      * @param size
      *            The new size
      */
-    public void resize(QemuImgFile file, long size) throws QemuImgException {
+    public void resize(final QemuImgFile file, final long size) throws QemuImgException {
         this.resize(file, size, false);
     }
 }


[45/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Fix findbugs high priority warning VmwareStorageManagerImpl.java:1023, RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE, Priority: High Redundant nullcheck of datastoreVolumePath, which is known to be non-null in com.cloud.hypervisor.vmware.manager.VmwareStorageManagerImpl.getVolumePathInDatastore(DatastoreMO, String)

Assertion is not used in runtime, correct way is throw and handle exception without killing app

Signed-off-by: Daan Hoogland <da...@gmail.com>

This closes #362


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e8c7069f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e8c7069f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e8c7069f

Branch: refs/heads/feature/vpc-ipv6
Commit: e8c7069f734b7886cbe0e8dfec4452a4fdfe20b3
Parents: 9bac84a
Author: Rafael da Fonseca <rs...@gmail.com>
Authored: Sun Jun 7 11:38:04 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Mon Jun 8 09:40:50 2015 +0200

----------------------------------------------------------------------
 .../cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java   | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e8c7069f/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 3aec7a4..34ede03 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -1019,7 +1019,6 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
 
     private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName) throws Exception {
         String datastoreVolumePath = dsMo.searchFileInSubFolders(volumeFileName, true);
-        assert (datastoreVolumePath != null) : "Virtual disk file missing from datastore.";
         if (datastoreVolumePath == null) {
             throw new CloudRuntimeException("Unable to find file " + volumeFileName + " in datastore " + dsMo.getName());
         }


[20/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8530: KVM hosts without active agents should be in Disconnected state

KVM hosts which are actuall up, but if their agents are shutdown should be put
in disconnected state. This would avoid getting the VMs HA'd and other commands
such as deploying a VM will exclude that host and save us from errors.

The improvement is that, we first try to contact the KVM host itself. If it fails
we assume that it's disconnected, and then ask its KVM neighbours if they can
check its status. If all of the KVM neighbours tell us that it's Down and we're
unable to reach the KVM host, then the host is possibly down. In case any of the
KVM neighbours tell us that it's Up but we're unable to reach the KVM host then
we can be sure that the agent is offline but the host is running.

Signed-off-by: Rohit Yadav <ro...@shapeblue.com>
Signed-off-by: wilderrodrigues <wr...@schubergphilis.com>

This closes #340


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f3412468
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f3412468
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f3412468

Branch: refs/heads/feature/vpc-ipv6
Commit: f3412468881e5c45e494eae4d7f6fb9ccdc665c4
Parents: f2b1ec2
Author: Rohit Yadav <ro...@shapeblue.com>
Authored: Mon Jun 1 14:53:58 2015 +0200
Committer: wilderrodrigues <wr...@schubergphilis.com>
Committed: Tue Jun 2 13:27:16 2015 +0200

----------------------------------------------------------------------
 .../kvm/src/com/cloud/ha/KVMInvestigator.java   | 43 +++++++++++++++-----
 1 file changed, 33 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f3412468/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java
index 43cc71d..000d37c 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java
@@ -18,13 +18,6 @@
  */
 package com.cloud.ha;
 
-import java.util.List;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-
-import org.apache.log4j.Logger;
-
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.CheckOnHostCommand;
@@ -35,6 +28,11 @@ import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor;
 import com.cloud.resource.ResourceManager;
 import com.cloud.utils.component.AdapterBase;
+import org.apache.log4j.Logger;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import java.util.List;
 
 @Local(value = Investigator.class)
 public class KVMInvestigator extends AdapterBase implements Investigator {
@@ -64,22 +62,47 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
         if (agent.getHypervisorType() != Hypervisor.HypervisorType.KVM && agent.getHypervisorType() != Hypervisor.HypervisorType.LXC) {
             return null;
         }
+        Status hostStatus = null;
+        Status neighbourStatus = null;
         CheckOnHostCommand cmd = new CheckOnHostCommand(agent);
+
+        try {
+            Answer answer = _agentMgr.easySend(agent.getId(), cmd);
+            if (answer != null) {
+                hostStatus = answer.getResult() ? Status.Down : Status.Up;
+            }
+        } catch (Exception e) {
+            s_logger.debug("Failed to send command to host: " + agent.getId());
+        }
+        if (hostStatus == null) {
+            hostStatus = Status.Disconnected;
+        }
+
         List<HostVO> neighbors = _resourceMgr.listHostsInClusterByStatus(agent.getClusterId(), Status.Up);
         for (HostVO neighbor : neighbors) {
             if (neighbor.getId() == agent.getId() || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) {
                 continue;
             }
+            s_logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId());
             try {
                 Answer answer = _agentMgr.easySend(neighbor.getId(), cmd);
                 if (answer != null) {
-                    return answer.getResult() ? Status.Down : Status.Up;
+                    neighbourStatus = answer.getResult() ? Status.Down : Status.Up;
+                    s_logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId());
+                    if (neighbourStatus == Status.Up) {
+                        break;
+                    }
                 }
             } catch (Exception e) {
                 s_logger.debug("Failed to send command to host: " + neighbor.getId());
             }
         }
-
-        return null;
+        if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) {
+            hostStatus = Status.Disconnected;
+        }
+        if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) {
+            hostStatus = Status.Down;
+        }
+        return hostStatus;
     }
 }


[30/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Formatting the code - Adding final modifier to attributes and indenting the code.

Signed-off-by: Daan Hoogland <da...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/159d8c2c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/159d8c2c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/159d8c2c

Branch: refs/heads/feature/vpc-ipv6
Commit: 159d8c2c977592c3678474a8961954cc3e3682b3
Parents: f92a503
Author: wilderrodrigues <wr...@schubergphilis.com>
Authored: Thu Jun 4 08:12:04 2015 +0200
Committer: Daan Hoogland <da...@gmail.com>
Committed: Thu Jun 4 12:34:03 2015 +0200

----------------------------------------------------------------------
 .../ovm/src/com/cloud/ovm/object/Test.java      | 63 ++++++++++----------
 1 file changed, 32 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/159d8c2c/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
index 080b3cc..f5cecb1 100644
--- a/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
+++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/object/Test.java
@@ -21,7 +21,7 @@ import java.util.List;
 import java.util.Map;
 
 public class Test {
-    public static void main(String[] args) {
+    public static void main(final String[] args) {
         try {
             /*Connection c = new Connection("192.168.105.155", "oracle", "password");
             Utils util = new UtilsImpl(c);
@@ -56,16 +56,16 @@ public class Test {
             Pair<Long, Long> spaceInfo = storage.getSrSpaceInfo("192.168.110.232:/export/frank/nfs");
             System.out.println("Total:" + spaceInfo.first());
             System.out.println("Free:" + spaceInfo.second());*/
-            OvmVm.Details vm = new OvmVm.Details();
+            final OvmVm.Details vm = new OvmVm.Details();
             vm.cpuNum = 1;
             vm.memory = 512;
             vm.name = "Test";
             vm.uuid = "This-is-a-test";
-            OvmDisk.Details rootDisk = new OvmDisk.Details();
+            final OvmDisk.Details rootDisk = new OvmDisk.Details();
             rootDisk.path = "/root/root.raw";
             rootDisk.type = OvmDisk.WRITE;
             vm.rootDisk = rootDisk;
-            OvmDisk.Details dataDisk = new OvmDisk.Details();
+            final OvmDisk.Details dataDisk = new OvmDisk.Details();
             dataDisk.path = "/tmp/data.raw";
             dataDisk.type = OvmDisk.SHAREDWRITE;
             vm.disks.add(dataDisk);
@@ -73,7 +73,7 @@ public class Test {
             vm.disks.add(dataDisk);
             vm.disks.add(dataDisk);
             vm.disks.add(dataDisk);
-            OvmVif.Details vif = new OvmVif.Details();
+            final OvmVif.Details vif = new OvmVif.Details();
             vif.mac = "00:ff:ff:ff:ff:ee";
             vif.bridge = "xenbr0";
             vif.type = OvmVif.NETFRONT;
@@ -83,34 +83,35 @@ public class Test {
             vm.vifs.add(vif);
             vm.vifs.add(vif);
             //System.out.println(vm.toJson());
-            Connection c = new Connection("192.168.189.12", "oracle", "password");
+            final Connection c = new Connection("192.168.189.12", "oracle", "password");
             //System.out.println(Coder.toJson(OvmHost.getDetails(c)));
-            String txt =
-                "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}";
+            final String txt =
+                    "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}";
+
             //OvmHost.Details d = new GsonBuilder().create().fromJson(txt, OvmHost.Details.class);
             //OvmHost.Details d = Coder.fromJson(txt, OvmHost.Details.class);
             //OvmHost.Details d = OvmHost.getDetails(c);
             //System.out.println(Coder.toJson(d));
-//            OvmStoragePool.Details pool = new OvmStoragePool.Details();
-//            pool.path = "192.168.110.232:/export/frank/ovs";
-//            pool.type = OvmStoragePool.NFS;
-//            pool.uuid = "123";
-//            System.out.println(pool.toJson());
+            //            OvmStoragePool.Details pool = new OvmStoragePool.Details();
+            //            pool.path = "192.168.110.232:/export/frank/ovs";
+            //            pool.type = OvmStoragePool.NFS;
+            //            pool.uuid = "123";
+            //            System.out.println(pool.toJson());
 
             String cmd = null;
             System.out.println(args.length);
             if (args.length >= 1) {
                 cmd = args[0];
-                OvmVm.Details d = new OvmVm.Details();
+                final OvmVm.Details d = new OvmVm.Details();
                 d.cpuNum = 1;
                 d.memory = 512 * 1024 * 1024;
                 d.name = "MyTest";
                 d.uuid = "1-2-3-4-5";
-                OvmDisk.Details r = new OvmDisk.Details();
+                final OvmDisk.Details r = new OvmDisk.Details();
                 r.path = "/var/ovs/mount/60D0985974CA425AAF5D01A1F161CC8B/running_pool/36_systemvm/System.img";
                 r.type = OvmDisk.WRITE;
                 d.rootDisk = r;
-                OvmVif.Details v = new OvmVif.Details();
+                final OvmVif.Details v = new OvmVif.Details();
                 v.mac = "00:16:3E:5C:B1:D1";
                 v.bridge = "xenbr0";
                 v.type = OvmVif.NETFRONT;
@@ -123,55 +124,55 @@ public class Test {
                     OvmVm.create(c, d);
                     // c.call("OvmVm.echo", new Object[]{s});
                 } else if (cmd.equalsIgnoreCase("reboot")) {
-                    Map<String, String> res = OvmVm.reboot(c, "MyTest");
+                    final Map<String, String> res = OvmVm.reboot(c, "MyTest");
                     System.out.println(res.get("vncPort"));
                     //OvmVm.stop(c, "MyTest");
                     //OvmVm.create(c, d);
                 } else if (cmd.equalsIgnoreCase("stop")) {
                     OvmVm.stop(c, "MyTest");
                 } else if (cmd.equalsIgnoreCase("details")) {
-                    OvmVm.Details ddd = OvmVm.getDetails(c, "MyTest");
+                    final OvmVm.Details ddd = OvmVm.getDetails(c, "MyTest");
                     System.out.println(ddd.vifs.size());
                     System.out.println(ddd.rootDisk.path);
                     System.out.println(ddd.powerState);
                 } else if (cmd.equalsIgnoreCase("all")) {
                     System.out.println(OvmHost.getAllVms(c));
                 } else if (cmd.equalsIgnoreCase("createBridge")) {
-                    OvmBridge.Details bd = new OvmBridge.Details();
+                    final OvmBridge.Details bd = new OvmBridge.Details();
                     bd.name = "xenbr10";
                     bd.attach = args[1];
                     OvmBridge.create(c, bd);
                 } else if (cmd.equalsIgnoreCase("createVlan")) {
-                    OvmVlan.Details vd = new OvmVlan.Details();
+                    final OvmVlan.Details vd = new OvmVlan.Details();
                     vd.pif = "eth0";
                     vd.vid = 1000;
-                    String vname = OvmVlan.create(c, vd);
+                    final String vname = OvmVlan.create(c, vd);
                     System.out.println(vname);
                 } else if (cmd.equalsIgnoreCase("delVlan")) {
                     OvmVlan.delete(c, args[1]);
                 } else if (cmd.equalsIgnoreCase("delBr")) {
                     OvmBridge.delete(c, args[1]);
                 } else if (cmd.equalsIgnoreCase("getBrs")) {
-                    List<String> brs = OvmBridge.getAllBridges(c);
+                    final List<String> brs = OvmBridge.getAllBridges(c);
                     System.out.println(brs);
                 } else if (cmd.equalsIgnoreCase("getBrDetails")) {
-                    OvmBridge.Details brd = OvmBridge.getDetails(c, args[1]);
+                    final OvmBridge.Details brd = OvmBridge.getDetails(c, args[1]);
                     System.out.println(brd.interfaces);
                 }
 
             }
 
-            List<String> l = new ArrayList<String>();
+            final List<String> l = new ArrayList<String>();
             l.add("4b4d8951-f0b6-36c5-b4f3-a82ff2611c65");
             System.out.println(Coder.toJson(l));
 
-//            Map<String, String> res = OvmHost.getPerformanceStats(c, "xenbr0");
-//            System.out.println(res.toString());
-//            String stxt = "{\"vifs\": [{\"bridge\": \"xenbr0\", \"mac\": \"00:16:3E:5C:B1:D1\", \"type\": \"netfront\"}], \"powerState\": \"RUNNING\", \"disks\": [], \"cpuNum\": 1, \"memory\": 536870912, \"rootDisk\": {\"path\": \"/var/ovs/mount/60D0985974CA425AAF5D01A1F161CC8B/running_pool/MyTest/System.img\", \"type\": \"w\"}}";
-//            OvmVm.Details ddd = Coder.fromJson(stxt, OvmVm.Details.class);
-//            System.out.println(ddd.vifs.size());
-//            System.out.println(ddd.rootDisk.path);
-        } catch (Exception e) {
+            //            Map<String, String> res = OvmHost.getPerformanceStats(c, "xenbr0");
+            //            System.out.println(res.toString());
+            //            String stxt = "{\"vifs\": [{\"bridge\": \"xenbr0\", \"mac\": \"00:16:3E:5C:B1:D1\", \"type\": \"netfront\"}], \"powerState\": \"RUNNING\", \"disks\": [], \"cpuNum\": 1, \"memory\": 536870912, \"rootDisk\": {\"path\": \"/var/ovs/mount/60D0985974CA425AAF5D01A1F161CC8B/running_pool/MyTest/System.img\", \"type\": \"w\"}}";
+            //            OvmVm.Details ddd = Coder.fromJson(stxt, OvmVm.Details.class);
+            //            System.out.println(ddd.vifs.size());
+            //            System.out.println(ddd.rootDisk.path);
+        } catch (final Exception e) {
             // TODO Auto-generated catch block
             e.printStackTrace();
         }


[50/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
CLOUDSTACK-8542: Correcting test case in test_vpc_on_host_maintenance.py

Signed-off-by: Gaurav Aradhye <ga...@clogeny.com>
This closes #360


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/6140db50
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/6140db50
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/6140db50

Branch: refs/heads/feature/vpc-ipv6
Commit: 6140db50b3d6ca79329cfcaf2f4293da8e38310d
Parents: b272d77
Author: Gaurav Aradhye <ga...@clogeny.com>
Authored: Fri Jun 5 14:35:56 2015 +0530
Committer: Gaurav Aradhye <ga...@clogeny.com>
Committed: Mon Jun 8 13:37:37 2015 +0530

----------------------------------------------------------------------
 .../maint/test_vpc_on_host_maintenance.py       | 121 +++----------------
 1 file changed, 17 insertions(+), 104 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6140db50/test/integration/component/maint/test_vpc_on_host_maintenance.py
----------------------------------------------------------------------
diff --git a/test/integration/component/maint/test_vpc_on_host_maintenance.py b/test/integration/component/maint/test_vpc_on_host_maintenance.py
index 8ee50bf..3735b68 100644
--- a/test/integration/component/maint/test_vpc_on_host_maintenance.py
+++ b/test/integration/component/maint/test_vpc_on_host_maintenance.py
@@ -24,65 +24,23 @@ from marvin.lib.base import (Account,
                              VpcOffering)
 from marvin.lib.common import (get_domain,
                                get_zone,
-                               get_template,
-                               list_configurations)
+                               get_template)
 import time
 
-
-class Services:
-
-    """Test VPC services
-    """
-
-    def __init__(self):
-        self.services = {
-            "account": {
-                "email": "test@test.com",
-                "firstname": "Test",
-                "lastname": "User",
-                "username": "test",
-                # Random characters are appended for unique
-                # username
-                "password": "password",
-            },
-            "vpc_offering": {
-                "name": 'VPC off',
-                "displaytext": 'VPC off',
-                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,\
-UserData,StaticNat,NetworkACL',
-            },
-            "vpc": {
-                "name": "TestVPC",
-                "displaytext": "TestVPC",
-                "cidr": '10.0.0.1/24'
-            },
-            "virtual_machine": {
-                "displayname": "Test VM",
-                "username": "root",
-                "password": "password",
-                "ssh_port": 22,
-                "hypervisor": 'XenServer',
-                # Hypervisor type should be same as
-                # hypervisor type of cluster
-                "privateport": 22,
-                "publicport": 22,
-                "protocol": 'TCP',
-            },
-            "ostype": 'CentOS 5.3 (64-bit)',
-            # Cent OS 5.3 (64 bit)
-            "sleep": 60,
-            "timeout": 10
-        }
-
-
 class TestVPCHostMaintenance(cloudstackTestCase):
 
     @classmethod
     def setUpClass(cls):
         cls.testClient = super(TestVPCHostMaintenance, cls).getClsTestClient()
         cls.api_client = cls.testClient.getApiClient()
-
-        cls.services = Services().services
+        cls._cleanup = []
+        cls.hosts = []
+        cls.vpcSupported = True
+        cls.hypervisor = cls.testClient.getHypervisorInfo()
+        if cls.hypervisor.lower() in ['hyperv']:
+            cls.vpcSupported = False
+            return
+        cls.services = cls.testClient.getParsedTestDataConfig()
         # Get Zone, Domain and templates
         cls.domain = get_domain(cls.api_client)
         cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
@@ -135,9 +93,7 @@ class TestVPCHostMaintenance(cloudstackTestCase):
                             host.name)
                     timeout = timeout - 1
 
-        cls._cleanup = [
-            cls.vpc_off
-        ]
+        cls._cleanup.append(cls.vpc_off)
         return
 
     @classmethod
@@ -166,13 +122,18 @@ class TestVPCHostMaintenance(cloudstackTestCase):
     def setUp(self):
         self.apiclient = self.testClient.getApiClient()
         self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+
+        if not self.vpcSupported:
+            self.skipTest("VPC is not supported on %s" % self.hypervisor)
+
         self.account = Account.create(
             self.apiclient,
             self.services["account"],
             admin=True,
             domainid=self.domain.id
         )
-        self.cleanup = [self.account]
+        self.cleanup.append(self.account)
         return
 
     def tearDown(self):
@@ -255,53 +216,5 @@ class TestVPCHostMaintenance(cloudstackTestCase):
             domainid=self.account.domainid,
             start=False
         )
-        self.validate_vpc_network(vpc, state='inactive')
-        return
-
-    @attr(tags=["advanced", "intervlan"])
-    def test_02_create_vpc_wait_gc(self):
-        """ Test VPC when host is in maintenance mode and wait till nw gc
-        """
-
-        # Validate the following
-        # 1. Put the host in maintenance mode.
-        # 2. Attempt to Create a VPC with cidr - 10.1.1.1/16
-        # 3. Wait for the VPC GC thread to run.
-        # 3. VPC will be created but will be in "Disabled" state and should
-        #    get deleted
-
-        self.debug("creating a VPC network in the account: %s" %
-                   self.account.name)
-        self.services["vpc"]["cidr"] = '10.1.1.1/16'
-        vpc = VPC.create(
-            self.apiclient,
-            self.services["vpc"],
-            vpcofferingid=self.vpc_off.id,
-            zoneid=self.zone.id,
-            account=self.account.name,
-            domainid=self.account.domainid,
-            start=False
-        )
-        self.validate_vpc_network(vpc, state='inactive')
-        interval = list_configurations(
-            self.apiclient,
-            name='network.gc.interval'
-        )
-        wait = list_configurations(
-            self.apiclient,
-            name='network.gc.wait'
-        )
-        self.debug("Sleep till network gc thread runs..")
-        # Sleep to ensure that all resources are deleted
-        time.sleep(int(interval[0].value) + int(wait[0].value))
-        vpcs = VPC.list(
-            self.apiclient,
-            id=vpc.id,
-            listall=True
-        )
-        self.assertEqual(
-            vpcs,
-            None,
-            "List VPC should not return anything after network gc"
-        )
+        self.validate_vpc_network(vpc, state='enabled')
         return


[21/50] [abbrv] git commit: updated refs/heads/feature/vpc-ipv6 to 6140db5

Posted by ek...@apache.org.
Set the url accordingly when installing a system vm template

The script that installs the system vm templates sets the uuid column
for the template being installed, however it does not set the respective
url column. This commit changes that.

Signed-off-by: Remi Bergsma <ap...@remi.nl>

This closes #348


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e983246c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e983246c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e983246c

Branch: refs/heads/feature/vpc-ipv6
Commit: e983246cd4caa91f5d6bc86b7f211682694d4972
Parents: f341246
Author: miguelaferreira <mi...@me.com>
Authored: Tue Jun 2 14:23:31 2015 +0200
Committer: Remi Bergsma <ap...@remi.nl>
Committed: Tue Jun 2 16:00:15 2015 +0200

----------------------------------------------------------------------
 scripts/storage/secondary/cloud-install-sys-tmplt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e983246c/scripts/storage/secondary/cloud-install-sys-tmplt
----------------------------------------------------------------------
diff --git a/scripts/storage/secondary/cloud-install-sys-tmplt b/scripts/storage/secondary/cloud-install-sys-tmplt
index 25a7dae..fe700d3 100755
--- a/scripts/storage/secondary/cloud-install-sys-tmplt
+++ b/scripts/storage/secondary/cloud-install-sys-tmplt
@@ -184,7 +184,7 @@ fi
 _uuid=$(uuidgen)
 localfile=$_uuid.$ext
 
-_res=(`mysql -h $dbHost --user=$dbUser --password=$dbPassword --skip-column-names -U cloud -e "update cloud.vm_template set uuid=\"$_uuid\" where id=\"$templateId\""`)
+_res=(`mysql -h $dbHost --user=$dbUser --password=$dbPassword --skip-column-names -U cloud -e "update cloud.vm_template set uuid=\"$_uuid\", url=\"$url\" where id=\"$templateId\""`)
 
 mntpoint=`echo "$mntpoint" | sed 's|/*$||'`