You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ah...@apache.org on 2013/07/01 23:24:45 UTC

[01/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Updated Branches:
  refs/heads/vmsync 1f5c67231 -> f7370196d


Cloudstack-3106 Delete all ips except ipAlias. Cloudstack-3119 Shared network removal doesn't cleanup corresponding IP ranges

Signed-off-by: Jayapal <ja...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/6b0df256
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/6b0df256
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/6b0df256

Branch: refs/heads/vmsync
Commit: 6b0df2566db34eb87c9603e6effcad2ad2b366c6
Parents: 7f2f25d
Author: Bharat Kumar <bh...@citrix.com>
Authored: Fri Jun 28 14:14:43 2013 +0530
Committer: Jayapal <ja...@apache.org>
Committed: Fri Jun 28 19:05:55 2013 +0530

----------------------------------------------------------------------
 engine/schema/src/com/cloud/dc/VlanVO.java      |   4 +
 .../src/com/cloud/network/dao/IPAddressDao.java |   6 +
 .../com/cloud/network/dao/IPAddressDaoImpl.java |  23 +++
 .../configuration/ConfigurationManagerImpl.java | 153 ++++++-------------
 .../VirtualNetworkApplianceManagerImpl.java     |   2 -
 5 files changed, 79 insertions(+), 109 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6b0df256/engine/schema/src/com/cloud/dc/VlanVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/VlanVO.java b/engine/schema/src/com/cloud/dc/VlanVO.java
index a2f7a9c..d262409 100644
--- a/engine/schema/src/com/cloud/dc/VlanVO.java
+++ b/engine/schema/src/com/cloud/dc/VlanVO.java
@@ -197,4 +197,8 @@ public class VlanVO implements Vlan {
 	public void setIp6Range(String ip6Range) {
 		this.ip6Range = ip6Range;
 	}
+
+    public void setIpRange(String ipRange) {
+        this.ip6Range = ipRange;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6b0df256/engine/schema/src/com/cloud/network/dao/IPAddressDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDao.java b/engine/schema/src/com/cloud/network/dao/IPAddressDao.java
index fecd44a..3eba6d8 100755
--- a/engine/schema/src/com/cloud/network/dao/IPAddressDao.java
+++ b/engine/schema/src/com/cloud/network/dao/IPAddressDao.java
@@ -17,9 +17,11 @@
 package com.cloud.network.dao;
 
 import com.cloud.dc.Vlan.VlanType;
+import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GenericDao;
 import com.cloud.utils.net.Ip;
 
+import java.sql.SQLException;
 import java.util.List;
 
 public interface IPAddressDao extends GenericDao<IPAddressVO, Long> {
@@ -72,4 +74,8 @@ public interface IPAddressDao extends GenericDao<IPAddressVO, Long> {
     IPAddressVO findByIpAndVlanId(String ipAddress, long vlanid);
 
     long countFreeIpsInVlan(long vlanDbId);
+
+    boolean deletePublicIPRangeExceptAliasIP(long vlanDbId, String aliasIp) throws SQLException;
+
+    boolean deletePublicIPRange(long vlanDbId) throws SQLException;
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6b0df256/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
index 886011e..1051b69 100755
--- a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
@@ -40,6 +40,7 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.Date;
 import java.util.List;
 
@@ -365,6 +366,28 @@ public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implemen
     }
 
     @Override
+    public boolean deletePublicIPRangeExceptAliasIP(long vlanDbId, String aliasIp) throws SQLException {
+        Transaction txn = Transaction.currentTxn();
+        String deleteSql = "DELETE FROM `cloud`.`user_ip_address` WHERE vlan_db_id = ? and public_ip_address!=?";
+
+        txn.start();
+        PreparedStatement stmt = txn.prepareAutoCloseStatement(deleteSql);
+        stmt.setLong(1, vlanDbId);
+        stmt.setString(2, aliasIp);
+        stmt.executeUpdate();
+        txn.commit();
+        return true;
+    }
+
+    @Override
+    public boolean deletePublicIPRange(long vlanDbId) throws SQLException{
+        SearchCriteria<IPAddressVO> sc = AllFieldsSearch.create();
+        sc.setParameters("vlan", vlanDbId);
+        remove(sc);
+        return true;
+    }
+
+    @Override
     @DB
     public boolean remove(Long id) {
         Transaction txn = Transaction.currentTxn();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6b0df256/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
index 041f29a..51c323d 100755
--- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
+++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
@@ -123,7 +123,6 @@ import com.cloud.event.ActionEvent;
 import com.cloud.event.EventTypes;
 import com.cloud.event.UsageEventUtils;
 import com.cloud.exception.ConcurrentOperationException;
-import com.cloud.exception.InsufficientAddressCapacityException;
 import com.cloud.exception.InsufficientCapacityException;
 import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.exception.PermissionDeniedException;
@@ -142,7 +141,6 @@ import com.cloud.network.NetworkService;
 import com.cloud.network.Networks.BroadcastDomainType;
 import com.cloud.network.Networks.TrafficType;
 import com.cloud.network.PhysicalNetwork;
-import com.cloud.network.addr.PublicIp;
 import com.cloud.network.dao.FirewallRulesDao;
 import com.cloud.network.dao.IPAddressDao;
 import com.cloud.network.dao.IPAddressVO;
@@ -3138,11 +3136,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         return vlan;
     }
 
-    public boolean removeFromDb (long  vlanDbId){
-        if (!deletePublicIPRange(vlanDbId)) {
-            return false;
-        }
-        return  _vlanDao.expunge(vlanDbId);
+    @DB
+    public void deleteVLANFromDb(long vlanDbId) throws SQLException {
+        Transaction txn = Transaction.currentTxn();
+        txn.start();
+        _publicIpAddressDao.deletePublicIPRange(vlanDbId);
+        _vlanDao.expunge(vlanDbId);
+        txn.commit();
     }
 
     @Override
@@ -3226,34 +3226,31 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
                             .getVlanType().toString(), ip.getSystem(), ip.getClass().getName(), ip.getUuid());
                 }
             }
-            if (_networkModel.areServicesSupportedInNetwork(vlanRange.getNetworkId(), Service.Dhcp)) {
-                Network network = _networkDao.findById(vlanRange.getNetworkId());
-                DhcpServiceProvider dhcpServiceProvider = _networkMgr.getDhcpServiceProvider(network);
-                if (!dhcpServiceProvider.getProvider().getName().equalsIgnoreCase(Provider.VirtualRouter.getName())) {
-                    Transaction txn = Transaction.currentTxn();
-                    txn.start();
-                    if (!removeFromDb(vlanDbId)) {
-                        txn.rollback();
-                        txn.close();
-                        return false;
-                    }
-
-                    else {
-                        txn.commit();
+            try {
+                if (_networkModel.areServicesSupportedInNetwork(vlanRange.getNetworkId(), Service.Dhcp)) {
+                    Network network = _networkDao.findById(vlanRange.getNetworkId());
+                    DhcpServiceProvider dhcpServiceProvider = _networkMgr.getDhcpServiceProvider(network);
+                    if (!dhcpServiceProvider.getProvider().getName().equalsIgnoreCase(Provider.VirtualRouter.getName())) {
+                        deleteVLANFromDb(vlanDbId);
+                    } else {
+                        return  handleIpAliasDeletion(vlanRange, vlanDbId, dhcpServiceProvider, network);
                     }
-                    txn.close();
                 }
 
                 else {
-                  return  handleIpAliasDeletion(vlanRange, vlanDbId, dhcpServiceProvider, network);
+                    deleteVLANFromDb(vlanDbId);
                 }
             }
+            catch ( SQLException e) {
+               throw  new CloudRuntimeException(e.getMessage());
+            }
+
         }
         return true;
     }
 
-    private boolean handleIpAliasDeletion(VlanVO vlanRange, long vlanDbId, DhcpServiceProvider dhcpServiceProvider, Network network) {
-        boolean result_final = false;
+    @DB
+    private boolean handleIpAliasDeletion(VlanVO vlanRange, long vlanDbId, DhcpServiceProvider dhcpServiceProvider, Network network) throws SQLException {
         Transaction txn = Transaction.currentTxn();
         txn.start();
         IPAddressVO ip = null;
@@ -3263,87 +3260,48 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
             //search if the vlan has any allocated ips.
             allocIpCount = _publicIpAddressDao.countIPs(vlanRange.getDataCenterId(), vlanDbId, true);
             if (allocIpCount > 1) {
-                throw  new InvalidParameterValueException ("cannot delete this range as some of the vlans are in use.");
+                throw  new InvalidParameterValueException ("Cannot delete this range as some of the vlans are in use.");
             }
-            if (allocIpCount == 0){
-                result_final=true;
+            else if (allocIpCount == 0){
+                deleteVLANFromDb(vlanDbId);
             }
             else {
                 ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(vlanRange.getVlanGateway(), vlanRange.getNetworkId(),  NicIpAlias.state.active);
-                ipAlias.setState(NicIpAlias.state.revoked);
-                _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
                 //check if this ip belongs to this vlan and is allocated.
                 ip = _publicIpAddressDao.findByIpAndVlanId(ipAlias.getIp4Address(), vlanDbId);
                 if (ip != null && ip.getState() == IpAddress.State.Allocated) {
                     //check if there any other vlan ranges in the same subnet having free ips
                     List<VlanVO> vlanRanges = _vlanDao.listVlansByNetworkIdAndGateway(vlanRange.getNetworkId(), vlanRange.getVlanGateway());
                     //if there is no other vlanrage in this subnet. free the ip and delete the vlan.
-                    if (vlanRanges.size() == 1){
-                        boolean result = dhcpServiceProvider.removeDhcpSupportForSubnet(network);
-                        if (result == false) {
-                            result_final = false;
+                    if (vlanRanges.size() == 1) {
+                        ipAlias.setState(NicIpAlias.state.revoked);
+                        _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
+                        if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) {
                             s_logger.debug("Failed to delete the vlan range as we could not free the ip used to provide the dhcp service.");
-                        } else {
+                            //setting the state back to active
+                            ipAlias.setState(NicIpAlias.state.active);
+                            _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
+                        }
+                        else {
                             _publicIpAddressDao.unassignIpAddress(ip.getId());
-                            result_final = true;
+                            deleteVLANFromDb(vlanDbId);
                         }
                     } else {
-                        // if there are more vlans in the subnet check if there
-                        // are free ips.
-                        List<Long> vlanDbIdList = new ArrayList<Long>();
-                        for (VlanVO vlanrange : vlanRanges) {
-                            if (vlanrange.getId() != vlanDbId) {
-                                vlanDbIdList.add(vlanrange.getId());
-                            }
-                        }
-                        s_logger.info("vlan Range"
-                                + vlanRange.getId()
-                                + " id being deleted, one of the Ips in this range is used to provide the dhcp service, trying to free this ip and allocate a new one.");
-                        for (VlanVO vlanrange : vlanRanges) {
-                            if (vlanrange.getId() != vlanDbId) {
-
-                                long freeIpsInsubnet =  _publicIpAddressDao.countFreeIpsInVlan(vlanrange.getId());
-                                if (freeIpsInsubnet > 0){
-                                    //assign one free ip to the router for creating ip Alias. The ipalias is system managed ip so we are using the system account to allocate the ip not the caller.
-                                    boolean result = false;
-                                    PublicIp routerPublicIP = _networkMgr.assignPublicIpAddressFromVlans(network.getDataCenterId(), null, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), Vlan.VlanType.DirectAttached, vlanDbIdList, network.getId(), null, false);
-                                    s_logger.info("creating a db entry for the new ip alias.");
-                                    NicIpAliasVO newipAlias = new NicIpAliasVO(ipAlias.getNicId(), routerPublicIP.getAddress().addr(), ipAlias.getVmId(), ipAlias.getAccountId(), network.getDomainId(), network.getId(), ipAlias.getGateway(), ipAlias.getNetmask());
-                                    newipAlias.setAliasCount(routerPublicIP.getIpMacAddress());
-                                    _nicIpAliasDao.persist(newipAlias);
-                                    //we revoke all the rules and apply all the rules as a part of the removedhcp config. so the new ip will get configured when we delete the old ip.
-                                    s_logger.info("removing the old ip alias on router");
-                                    result = dhcpServiceProvider.removeDhcpSupportForSubnet(network);
-                                    if (result == false) {
-                                        s_logger.debug("could't delete the ip alias on the router");
-                                        result_final = false;
-                                    }
-                                    else {
-                                        _publicIpAddressDao.unassignIpAddress(ip.getId());
-                                        result_final=true;
-                                    }
-                                }
-                            }
-                        }
+                        // if there are more vlans in the subnet, free all the ips in the range except the ip alias.
+                        s_logger.info("vlan Range"+vlanRange.getId()+" id being deleted, one of the Ips in this range is used to provide the dhcp service, will free the rest of the IPs in range.");
+                        _publicIpAddressDao.deletePublicIPRangeExceptAliasIP(vlanDbId, ipAlias.getIp4Address());
+                        VlanVO vlan = _vlanDao.findById(vlanDbId);
+                        vlan.setIpRange(ipAlias.getIp4Address()+"-"+ipAlias.getIp4Address());
+                        _vlanDao.update(vlan.getId(), vlan);
                     }
                 }
             }
-
-        } catch (InsufficientAddressCapacityException e) {
-            throw new InvalidParameterValueException("cannot delete  vlan range"+ vlanRange.getId()+"one of the ips in this range is benig used to provide dhcp service. Cannot use some other ip as there are no free ips in this subnet");
-        }
-        finally {
-            if (result_final) {
-                if (!removeFromDb(vlanDbId)) {
-                    txn.rollback();
-                }
-                else {
-                    txn.commit();
-                }
-                txn.close();
-            }
+        } catch (CloudRuntimeException e) {
+            txn.rollback();
+            throw e;
         }
-        return result_final;
+        txn.commit();
+        return true;
     }
 
     @Override
@@ -3566,25 +3524,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
     }
 
     @DB
-    protected boolean deletePublicIPRange(long vlanDbId) {
-        Transaction txn = Transaction.currentTxn();
-        String deleteSql = "DELETE FROM `cloud`.`user_ip_address` WHERE vlan_db_id = ?";
-
-        txn.start();
-        try {
-            PreparedStatement stmt = txn.prepareAutoCloseStatement(deleteSql);
-            stmt.setLong(1, vlanDbId);
-            stmt.executeUpdate();
-        } catch (Exception ex) {
-            s_logger.error(ex.getMessage());
-            return false;
-        }
-        txn.commit();
-
-        return true;
-    }
-
-    @DB
     protected boolean savePublicIPRange(String startIP, String endIP, long zoneId, long vlanDbId, long sourceNetworkid,
             long physicalNetworkId) {
         long startIPLong = NetUtils.ip2Long(startIP);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/6b0df256/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
index 5327f0b..ddfa998 100755
--- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
+++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
@@ -2849,8 +2849,6 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
                     network.getId(), DataCenter.class, network.getDataCenterId());
         }
 
-        boolean agentResults = true;
-
         for (DomainRouterVO router : routers) {
             if (router.getState() != State.Running) {
                 s_logger.warn("Failed to add/remove VPN users: router not in running state");


[13/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3144: [Automation] Deletion of templates failing. Fails to
find image store housing template.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/87c401aa
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/87c401aa
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/87c401aa

Branch: refs/heads/vmsync
Commit: 87c401aaafbe786bd699091779a5f2741ab03889
Parents: 2c31f38
Author: Min Chen <mi...@citrix.com>
Authored: Fri Jun 28 17:35:19 2013 -0700
Committer: Min Chen <mi...@citrix.com>
Committed: Fri Jun 28 17:57:42 2013 -0700

----------------------------------------------------------------------
 .../cloud/storage/dao/VMTemplateDaoImpl.java    |  17 +-
 .../template/HypervisorTemplateAdapter.java     | 318 ++++++++++---------
 2 files changed, 169 insertions(+), 166 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/87c401aa/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
index ad33e7a..9e75990 100755
--- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
+++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
@@ -28,8 +28,6 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent;
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
@@ -56,9 +54,7 @@ import com.cloud.utils.db.JoinBuilder;
 import com.cloud.utils.db.SearchBuilder;
 import com.cloud.utils.db.SearchCriteria;
 import com.cloud.utils.db.SearchCriteria.Func;
-import com.cloud.utils.db.SearchCriteria.Op;
 import com.cloud.utils.db.Transaction;
-import com.cloud.utils.db.UpdateBuilder;
 import com.cloud.utils.exception.CloudRuntimeException;
 
 @Component
@@ -102,7 +98,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
     private SearchBuilder<VMTemplateVO> PublicIsoSearch;
     private SearchBuilder<VMTemplateVO> UserIsoSearch;
     private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
-   // private SearchBuilder<VMTemplateVO> updateStateSearch;
+    // private SearchBuilder<VMTemplateVO> updateStateSearch;
 
     @Inject
     ResourceTagDao _tagsDao;
@@ -344,6 +340,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
         AccountIdSearch = createSearchBuilder();
         AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
         AccountIdSearch.and("publicTemplate", AccountIdSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ);
+        AccountIdSearch.and("removed", AccountIdSearch.entity().getRemoved(), SearchCriteria.Op.NULL); // only list not removed templates for this account
         AccountIdSearch.done();
 
         SearchBuilder<VMTemplateZoneVO> tmpltZoneSearch = _templateZoneDao.createSearchBuilder();
@@ -369,11 +366,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
         CountTemplatesByAccount.and("removed", CountTemplatesByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
         CountTemplatesByAccount.done();
 
-//        updateStateSearch = this.createSearchBuilder();
-//        updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ);
-//        updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ);
-//        updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ);
-//        updateStateSearch.done();
+        //        updateStateSearch = this.createSearchBuilder();
+        //        updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ);
+        //        updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ);
+        //        updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ);
+        //        updateStateSearch.done();
 
         return result;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/87c401aa/server/src/com/cloud/template/HypervisorTemplateAdapter.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
index 569d947..da8c1c4 100755
--- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java
+++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
@@ -41,8 +41,8 @@ import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.async.AsyncRpcConext;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
-import org.apache.log4j.Logger;
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
+import org.apache.log4j.Logger;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
@@ -54,14 +54,13 @@ import com.cloud.event.EventTypes;
 import com.cloud.event.UsageEventUtils;
 import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.exception.ResourceAllocationException;
-import com.cloud.host.HostVO;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.Storage.TemplateType;
 import com.cloud.storage.TemplateProfile;
-import com.cloud.storage.VMTemplateZoneVO;
 import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
 import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VMTemplateZoneVO;
 import com.cloud.storage.dao.VMTemplateZoneDao;
 import com.cloud.storage.download.DownloadMonitor;
 import com.cloud.user.Account;
@@ -71,9 +70,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
 
 @Local(value=TemplateAdapter.class)
 public class HypervisorTemplateAdapter extends TemplateAdapterBase {
-	private final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class);
-	@Inject DownloadMonitor _downloadMonitor;
-	@Inject AgentManager _agentMgr;
+    private final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class);
+    @Inject DownloadMonitor _downloadMonitor;
+    @Inject AgentManager _agentMgr;
 
     @Inject DataStoreManager storeMgr;
     @Inject TemplateService imageService;
@@ -90,92 +89,92 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
     }
 
 
-	@Override
-	public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException {
-		TemplateProfile profile = super.prepare(cmd);
-		String url = profile.getUrl();
+    @Override
+    public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException {
+        TemplateProfile profile = super.prepare(cmd);
+        String url = profile.getUrl();
+
+        if((!url.toLowerCase().endsWith("iso"))&&(!url.toLowerCase().endsWith("iso.zip"))&&(!url.toLowerCase().endsWith("iso.bz2"))
+                &&(!url.toLowerCase().endsWith("iso.gz"))){
+            throw new InvalidParameterValueException("Please specify a valid iso");
+        }
+
+        UriUtils.validateUrl(url);
+        profile.setUrl(url);
+        // Check that the resource limit for secondary storage won't be exceeded
+        _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()),
+                ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
+        return profile;
+    }
+
+    @Override
+    public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException {
+        TemplateProfile profile = super.prepare(cmd);
+        String url = profile.getUrl();
+
+        if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip"))
+                &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz"))
+                &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip"))
+                &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz"))
+                &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip"))
+                &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz"))
+                &&(!url.toLowerCase().endsWith("tar"))&&(!url.toLowerCase().endsWith("tar.zip"))
+                &&(!url.toLowerCase().endsWith("tar.bz2"))&&(!url.toLowerCase().endsWith("tar.gz"))
+                &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){
+            throw new InvalidParameterValueException("Please specify a valid "+ cmd.getFormat().toLowerCase());
+        }
+
+        if ((cmd.getFormat().equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") ))
+                || (cmd.getFormat().equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") ))
+                || (cmd.getFormat().equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz")))
+                || (cmd.getFormat().equalsIgnoreCase("tar") && (!url.toLowerCase().endsWith("tar") && !url.toLowerCase().endsWith("tar.zip") && !url.toLowerCase().endsWith("tar.bz2") && !url.toLowerCase().endsWith("tar.gz")))
+                || (cmd.getFormat().equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) {
+            throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + cmd.getFormat().toLowerCase());
+        }
+
+        UriUtils.validateUrl(url);
+        profile.setUrl(url);
+        // Check that the resource limit for secondary storage won't be exceeded
+        _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()),
+                ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
+        return profile;
+    }
+
+    @Override
+    public VMTemplateVO create(TemplateProfile profile) {
+        // persist entry in vm_template, vm_template_details and template_zone_ref tables, not that entry at template_store_ref is not created here, and created in createTemplateAsync.
+        VMTemplateVO template = persistTemplate(profile);
 
-		if((!url.toLowerCase().endsWith("iso"))&&(!url.toLowerCase().endsWith("iso.zip"))&&(!url.toLowerCase().endsWith("iso.bz2"))
-        		&&(!url.toLowerCase().endsWith("iso.gz"))){
-        	throw new InvalidParameterValueException("Please specify a valid iso");
+        if (template == null) {
+            throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate());
         }
 
-		UriUtils.validateUrl(url);
-		profile.setUrl(url);
-		// Check that the resource limit for secondary storage won't be exceeded
-		_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()),
-		        ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
-		return profile;
-	}
-
-	@Override
-	public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException {
-		TemplateProfile profile = super.prepare(cmd);
-		String url = profile.getUrl();
-
-		if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip"))
-	        &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz"))
-	        &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip"))
-	        &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz"))
-	        &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip"))
-	        &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz"))
-	        &&(!url.toLowerCase().endsWith("tar"))&&(!url.toLowerCase().endsWith("tar.zip"))
-	        &&(!url.toLowerCase().endsWith("tar.bz2"))&&(!url.toLowerCase().endsWith("tar.gz"))
-	        &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){
-	        throw new InvalidParameterValueException("Please specify a valid "+ cmd.getFormat().toLowerCase());
-	    }
-
-		if ((cmd.getFormat().equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") ))
-			|| (cmd.getFormat().equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") ))
-			|| (cmd.getFormat().equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz")))
-			|| (cmd.getFormat().equalsIgnoreCase("tar") && (!url.toLowerCase().endsWith("tar") && !url.toLowerCase().endsWith("tar.zip") && !url.toLowerCase().endsWith("tar.bz2") && !url.toLowerCase().endsWith("tar.gz")))
-			|| (cmd.getFormat().equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) {
-	        throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + cmd.getFormat().toLowerCase());
-		}
-
-		UriUtils.validateUrl(url);
-		profile.setUrl(url);
-		// Check that the resource limit for secondary storage won't be exceeded
-		_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()),
-		        ResourceType.secondary_storage, UriUtils.getRemoteSize(url));
-		return profile;
-	}
-
-	@Override
-	public VMTemplateVO create(TemplateProfile profile) {
-	    // persist entry in vm_template, vm_template_details and template_zone_ref tables, not that entry at template_store_ref is not created here, and created in createTemplateAsync.
-		VMTemplateVO template = persistTemplate(profile);
-
-		if (template == null) {
-			throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate());
-		}
-
-		// find all eligible image stores for this zone scope
-		List<DataStore> imageStores = this.storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId()));
-		if ( imageStores == null || imageStores.size() == 0 ){
-		    throw new CloudRuntimeException("Unable to find image store to download template "+ profile.getTemplate());
-		}
+        // find all eligible image stores for this zone scope
+        List<DataStore> imageStores = this.storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId()));
+        if ( imageStores == null || imageStores.size() == 0 ){
+            throw new CloudRuntimeException("Unable to find image store to download template "+ profile.getTemplate());
+        }
         for (DataStore imageStore : imageStores) {
-        	TemplateInfo tmpl = this.imageFactory.getTemplate(template.getId(), imageStore);
-        	CreateTemplateContext<TemplateApiResult> context = new CreateTemplateContext<TemplateApiResult>(null, tmpl);
-        	AsyncCallbackDispatcher<HypervisorTemplateAdapter, TemplateApiResult> caller = AsyncCallbackDispatcher.create(this);
-        	caller.setCallback(caller.getTarget().createTemplateAsyncCallBack(null, null));
-        	caller.setContext(context);
-           this.imageService
-                    .createTemplateAsync(tmpl, imageStore, caller);
+            TemplateInfo tmpl = this.imageFactory.getTemplate(template.getId(), imageStore);
+            CreateTemplateContext<TemplateApiResult> context = new CreateTemplateContext<TemplateApiResult>(null, tmpl);
+            AsyncCallbackDispatcher<HypervisorTemplateAdapter, TemplateApiResult> caller = AsyncCallbackDispatcher.create(this);
+            caller.setCallback(caller.getTarget().createTemplateAsyncCallBack(null, null));
+            caller.setContext(context);
+            this.imageService
+            .createTemplateAsync(tmpl, imageStore, caller);
         }
         _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template);
 
         return template;
     }
 
-	private class CreateTemplateContext<T> extends AsyncRpcConext<T> {
-		final TemplateInfo template;
-		public CreateTemplateContext(AsyncCompletionCallback<T> callback, TemplateInfo template) {
-			super(callback);
-			this.template = template;
-		}
-	}
+    private class CreateTemplateContext<T> extends AsyncRpcConext<T> {
+        final TemplateInfo template;
+        public CreateTemplateContext(AsyncCompletionCallback<T> callback, TemplateInfo template) {
+            super(callback);
+            this.template = template;
+        }
+    }
 
     protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher<HypervisorTemplateAdapter, TemplateApiResult> callback,
             CreateTemplateContext<TemplateApiResult> context) {
@@ -193,73 +192,80 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
             }
         }
 
-		return null;
-	}
+        return null;
+    }
 
-	@Override @DB
-	public boolean delete(TemplateProfile profile) {
-		boolean success = true;
+    @Override @DB
+    public boolean delete(TemplateProfile profile) {
+        boolean success = true;
 
-    	VMTemplateVO template = profile.getTemplate();
+        VMTemplateVO template = profile.getTemplate();
 
         // find all eligible image stores for this template
         List<DataStore> imageStores = this.templateMgr.getImageStoreByTemplate(template.getId(), profile.getZoneId());
-        if ( imageStores == null || imageStores.size() == 0 ){
-            throw new CloudRuntimeException("Unable to find image store to delete template "+ profile.getTemplate());
-        }
-
-        // Make sure the template is downloaded to all found image stores
-        for (DataStore store : imageStores) {
-            long storeId = store.getId();
-            List<TemplateDataStoreVO> templateStores = _tmpltStoreDao.listByTemplateStore(template.getId(), storeId);
-            for (TemplateDataStoreVO templateStore : templateStores) {
-                if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
-                    String errorMsg = "Please specify a template that is not currently being downloaded.";
-                    s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; cant' delete it.");
-                    throw new CloudRuntimeException(errorMsg);
+        if (imageStores == null || imageStores.size() == 0) {
+            // already destroyed on image stores
+            s_logger.info("Unable to find image store still having template: " + template.getName()
+                    + ", so just mark the template removed");
+        } else {
+            // Make sure the template is downloaded to all found image stores
+            for (DataStore store : imageStores) {
+                long storeId = store.getId();
+                List<TemplateDataStoreVO> templateStores = _tmpltStoreDao
+                        .listByTemplateStore(template.getId(), storeId);
+                for (TemplateDataStoreVO templateStore : templateStores) {
+                    if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
+                        String errorMsg = "Please specify a template that is not currently being downloaded.";
+                        s_logger.debug("Template: " + template.getName()
+                                + " is currently being downloaded to secondary storage host: " + store.getName()
+                                + "; cant' delete it.");
+                        throw new CloudRuntimeException(errorMsg);
+                    }
                 }
             }
-        }
 
-        String eventType = "";
-        if (template.getFormat().equals(ImageFormat.ISO)) {
-            eventType = EventTypes.EVENT_ISO_DELETE;
-        } else {
-            eventType = EventTypes.EVENT_TEMPLATE_DELETE;
-        }
-
-        for (DataStore imageStore : imageStores) {
-            // publish zone-wide usage event
-            Long sZoneId = ((ImageStoreEntity)imageStore).getDataCenterId();
-            if (sZoneId != null) {
-                UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, null, null);
+            String eventType = "";
+            if (template.getFormat().equals(ImageFormat.ISO)) {
+                eventType = EventTypes.EVENT_ISO_DELETE;
+            } else {
+                eventType = EventTypes.EVENT_TEMPLATE_DELETE;
             }
 
-            s_logger.info("Delete template from image store: " + imageStore.getName());
-            AsyncCallFuture<TemplateApiResult> future = this.imageService
-                    .deleteTemplateAsync(this.imageFactory.getTemplate(template.getId(), imageStore));
-            try {
-                TemplateApiResult result = future.get();
-                success = result.isSuccess();
-                if ( !success )
-                    break;
-
-                // remove from template_zone_ref
-                List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId());
-                if (templateZones != null) {
-                    for (VMTemplateZoneVO templateZone : templateZones) {
-                        templateZoneDao.remove(templateZone.getId());
+            for (DataStore imageStore : imageStores) {
+                // publish zone-wide usage event
+                Long sZoneId = ((ImageStoreEntity) imageStore).getDataCenterId();
+                if (sZoneId != null) {
+                    UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(),
+                            null, null, null);
+                }
+
+                s_logger.info("Delete template from image store: " + imageStore.getName());
+                AsyncCallFuture<TemplateApiResult> future = this.imageService.deleteTemplateAsync(this.imageFactory
+                        .getTemplate(template.getId(), imageStore));
+                try {
+                    TemplateApiResult result = future.get();
+                    success = result.isSuccess();
+                    if (!success) {
+                        break;
+                    }
+
+                    // remove from template_zone_ref
+                    List<VMTemplateZoneVO> templateZones = templateZoneDao
+                            .listByZoneTemplate(sZoneId, template.getId());
+                    if (templateZones != null) {
+                        for (VMTemplateZoneVO templateZone : templateZones) {
+                            templateZoneDao.remove(templateZone.getId());
+                        }
                     }
+                } catch (InterruptedException e) {
+                    s_logger.debug("delete template Failed", e);
+                    throw new CloudRuntimeException("delete template Failed", e);
+                } catch (ExecutionException e) {
+                    s_logger.debug("delete template Failed", e);
+                    throw new CloudRuntimeException("delete template Failed", e);
                 }
-            } catch (InterruptedException e) {
-                s_logger.debug("delete template Failed", e);
-                throw new CloudRuntimeException("delete template Failed", e);
-            } catch (ExecutionException e) {
-                s_logger.debug("delete template Failed", e);
-                throw new CloudRuntimeException("delete template Failed", e);
             }
         }
-
         if (success) {
             s_logger.info("Delete template from template table");
             // remove template from vm_templates table
@@ -275,36 +281,36 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
         return success;
 
 
-	}
+    }
 
-	@Override
+    @Override
     public TemplateProfile prepareDelete(DeleteTemplateCmd cmd) {
-		TemplateProfile profile = super.prepareDelete(cmd);
-		VMTemplateVO template = profile.getTemplate();
-		Long zoneId = profile.getZoneId();
+        TemplateProfile profile = super.prepareDelete(cmd);
+        VMTemplateVO template = profile.getTemplate();
+        Long zoneId = profile.getZoneId();
 
-		if (template.getTemplateType() == TemplateType.SYSTEM) {
-			throw new InvalidParameterValueException("The DomR template cannot be deleted.");
-		}
+        if (template.getTemplateType() == TemplateType.SYSTEM) {
+            throw new InvalidParameterValueException("The DomR template cannot be deleted.");
+        }
 
-		if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) {
-			throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone.");
-		}
+        if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) {
+            throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone.");
+        }
 
-		return profile;
-	}
+        return profile;
+    }
 
-	@Override
+    @Override
     public TemplateProfile prepareDelete(DeleteIsoCmd cmd) {
-		TemplateProfile profile = super.prepareDelete(cmd);
-		Long zoneId = profile.getZoneId();
+        TemplateProfile profile = super.prepareDelete(cmd);
+        Long zoneId = profile.getZoneId();
 
-		if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) {
-    		throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone.");
-    	}
+        if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) {
+            throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone.");
+        }
 
-		return profile;
-	}
+        return profile;
+    }
 
     @Override
     public TemplateProfile prepareExtractTemplate(ExtractTemplateCmd extractcmd) {


[46/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
removed CleanupDelegate

It did not have an implementation

Signed-off-by: Hugo Trippaers <ht...@schubergphilis.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/16b7509a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/16b7509a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/16b7509a

Branch: refs/heads/vmsync
Commit: 16b7509a234158b536155be9278cfee33b93a5c7
Parents: 639592f
Author: Laszlo Hornyak <la...@gmail.com>
Authored: Sat Jun 29 18:39:44 2013 +0200
Committer: Hugo Trippaers <ht...@schubergphilis.com>
Committed: Mon Jul 1 17:08:15 2013 +0200

----------------------------------------------------------------------
 utils/src/com/cloud/utils/CleanupDelegate.java | 22 ---------------------
 1 file changed, 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/16b7509a/utils/src/com/cloud/utils/CleanupDelegate.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/CleanupDelegate.java b/utils/src/com/cloud/utils/CleanupDelegate.java
deleted file mode 100644
index 6c368c3..0000000
--- a/utils/src/com/cloud/utils/CleanupDelegate.java
+++ /dev/null
@@ -1,22 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// the License.  You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.utils;
-
-public interface CleanupDelegate<T, M> {
-    
-	boolean cleanup(T itemContext, M managerContext);
-}


[17/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix typo in class name

AsyncRpcConext -> AsyncRpcContext

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/de38cd81
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/de38cd81
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/de38cd81

Branch: refs/heads/vmsync
Commit: de38cd815f76e39ddfd2d2cf9354378c677f718a
Parents: 56a001d
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Tue Jun 25 19:05:30 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:50:25 2013 +0530

----------------------------------------------------------------------
 .../storage/image/TemplateServiceImpl.java      |  7 +---
 .../storage/snapshot/SnapshotServiceImpl.java   |  8 ++--
 .../datastore/DataObjectManagerImpl.java        |  8 ++--
 .../storage/image/BaseImageStoreDriverImpl.java | 41 +++++++++-----------
 .../storage/volume/VolumeServiceImpl.java       | 16 ++++----
 .../framework/async/AsyncRpcConext.java         | 30 --------------
 .../framework/async/AsyncRpcContext.java        | 30 ++++++++++++++
 .../AsyncSampleEventDrivenStyleCaller.java      |  4 +-
 .../motion/VmwareStorageMotionStrategyTest.java |  4 +-
 .../CloudStackImageStoreLifeCycleImpl.java      | 34 ++++++++--------
 .../driver/SampleImageStoreDriverImpl.java      |  2 -
 .../SamplePrimaryDataStoreDriverImpl.java       | 14 ++++---
 .../template/HypervisorTemplateAdapter.java     | 16 ++++----
 13 files changed, 104 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
index da62712..22eb010 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
@@ -40,16 +40,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.DeleteCommand;
 import org.apache.cloudstack.storage.datastore.DataObjectManager;
@@ -84,7 +82,6 @@ import com.cloud.template.TemplateManager;
 import com.cloud.user.AccountManager;
 import com.cloud.user.ResourceLimitService;
 import com.cloud.utils.UriUtils;
-import com.cloud.utils.fsm.NoTransitionException;
 
 @Component
 public class TemplateServiceImpl implements TemplateService {
@@ -122,7 +119,7 @@ public class TemplateServiceImpl implements TemplateService {
     @Inject
     TemplateManager _tmpltMgr;
 
-    class TemplateOpContext<T> extends AsyncRpcConext<T> {
+    class TemplateOpContext<T> extends AsyncRpcContext<T> {
         final TemplateObject template;
         final AsyncCallFuture<TemplateApiResult> future;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
index 48ec512..3d7d4f2 100644
--- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
+++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
@@ -33,7 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.CopyCmdAnswer;
 import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
@@ -79,7 +79,7 @@ public class SnapshotServiceImpl implements SnapshotService {
     @Inject
     VMSnapshotDao _vmSnapshotDao;
 
-    static private class CreateSnapshotContext<T> extends AsyncRpcConext<T> {
+    static private class CreateSnapshotContext<T> extends AsyncRpcContext<T> {
         final SnapshotInfo snapshot;
         final AsyncCallFuture<SnapshotResult> future;
 
@@ -91,7 +91,7 @@ public class SnapshotServiceImpl implements SnapshotService {
         }
     }
 
-    static private class DeleteSnapshotContext<T> extends AsyncRpcConext<T> {
+    static private class DeleteSnapshotContext<T> extends AsyncRpcContext<T> {
         final SnapshotInfo snapshot;
         final AsyncCallFuture<SnapshotResult> future;
 
@@ -104,7 +104,7 @@ public class SnapshotServiceImpl implements SnapshotService {
 
     }
 
-    static private class CopySnapshotContext<T> extends AsyncRpcConext<T> {
+    static private class CopySnapshotContext<T> extends AsyncRpcContext<T> {
         final SnapshotInfo srcSnapshot;
         final SnapshotInfo destSnapshot;
         final AsyncCallFuture<SnapshotResult> future;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
index 7878d8d..db69c64 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
@@ -30,7 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
@@ -78,7 +78,7 @@ public class DataObjectManagerImpl implements DataObjectManager {
         return objectInDataStoreMgr.get(dataObj, dataStore);
     }
 
-    class CreateContext<T> extends AsyncRpcConext<T> {
+    class CreateContext<T> extends AsyncRpcContext<T> {
         final DataObject objInStrore;
 
         public CreateContext(AsyncCompletionCallback<T> callback, DataObject objInStore) {
@@ -205,7 +205,7 @@ public class DataObjectManagerImpl implements DataObjectManager {
         return null;
     }
 
-    class CopyContext<T> extends AsyncRpcConext<T> {
+    class CopyContext<T> extends AsyncRpcContext<T> {
         DataObject destObj;
         DataObject srcObj;
 
@@ -293,7 +293,7 @@ public class DataObjectManagerImpl implements DataObjectManager {
         return null;
     }
 
-    class DeleteContext<T> extends AsyncRpcConext<T> {
+    class DeleteContext<T> extends AsyncRpcContext<T> {
         private final DataObject obj;
 
         public DeleteContext(AsyncCompletionCallback<T> callback, DataObject obj) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
index 97c1671..9db205b 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
@@ -18,10 +18,16 @@
  */
 package org.apache.cloudstack.storage.image;
 
-import java.util.Date;
-import java.util.Set;
-import javax.inject.Inject;
-
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.DownloadAnswer;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.storage.VMTemplateStorageResourceAssoc;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.download.DownloadMonitor;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@@ -30,31 +36,22 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.DeleteCommand;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
-import org.apache.cloudstack.storage.image.ImageStoreDriver;
 import org.apache.log4j.Logger;
 
-import com.cloud.agent.api.Answer;
-import com.cloud.agent.api.storage.DownloadAnswer;
-import com.cloud.agent.api.to.DataObjectType;
-import com.cloud.agent.api.to.DataTO;
-import com.cloud.storage.VMTemplateStorageResourceAssoc;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.VolumeVO;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.download.DownloadMonitor;
+import javax.inject.Inject;
+import java.util.Date;
 
 public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
     private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class);
     @Inject
-    VMTemplateDao templateDao;
+    VMTemplateDao _templateDao;
     @Inject
     DownloadMonitor _downloadMonitor;
     @Inject
@@ -71,7 +68,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
         return null;
     }
 
-    class CreateContext<T> extends AsyncRpcConext<T> {
+    class CreateContext<T> extends AsyncRpcContext<T> {
         final DataObject data;
 
         public CreateContext(AsyncCompletionCallback<T> callback, DataObject data) {
@@ -115,9 +112,9 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
             updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
             _templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
             // update size in vm_template table
-            VMTemplateVO tmlptUpdater = templateDao.createForUpdate();
+            VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
             tmlptUpdater.setSize(answer.getTemplateSize());
-            templateDao.update(obj.getId(), tmlptUpdater);
+            _templateDao.update(obj.getId(), tmlptUpdater);
         }
 
         AsyncCompletionCallback<CreateCmdResult> caller = context.getParentCallback();
@@ -131,9 +128,9 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
             caller.complete(result);
         } else if (answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) {
             if (answer.getCheckSum() != null) {
-                VMTemplateVO templateDaoBuilder = templateDao.createForUpdate();
+                VMTemplateVO templateDaoBuilder = _templateDao.createForUpdate();
                 templateDaoBuilder.setChecksum(answer.getCheckSum());
-                templateDao.update(obj.getId(), templateDaoBuilder);
+                _templateDao.update(obj.getId(), templateDaoBuilder);
             }
 
             CreateCmdResult result = new CreateCmdResult(null, null);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index de1e423..89313e4 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -46,7 +46,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.DeleteCommand;
 import org.apache.cloudstack.storage.datastore.DataObjectManager;
@@ -124,7 +124,7 @@ public class VolumeServiceImpl implements VolumeService {
     public VolumeServiceImpl() {
     }
 
-    private class CreateVolumeContext<T> extends AsyncRpcConext<T> {
+    private class CreateVolumeContext<T> extends AsyncRpcContext<T> {
 
         private final DataObject volume;
         private final AsyncCallFuture<VolumeApiResult> future;
@@ -190,7 +190,7 @@ public class VolumeServiceImpl implements VolumeService {
         return null;
     }
 
-    private class DeleteVolumeContext<T> extends AsyncRpcConext<T> {
+    private class DeleteVolumeContext<T> extends AsyncRpcContext<T> {
         private final VolumeObject volume;
         private final AsyncCallFuture<VolumeApiResult> future;
 
@@ -282,7 +282,7 @@ public class VolumeServiceImpl implements VolumeService {
         return null;
     }
 
-    class CreateBaseImageContext<T> extends AsyncRpcConext<T> {
+    class CreateBaseImageContext<T> extends AsyncRpcContext<T> {
         private final VolumeInfo volume;
         private final PrimaryDataStore dataStore;
         private final TemplateInfo srcTemplate;
@@ -421,7 +421,7 @@ public class VolumeServiceImpl implements VolumeService {
         return null;
     }
 
-    private class CreateVolumeFromBaseImageContext<T> extends AsyncRpcConext<T> {
+    private class CreateVolumeFromBaseImageContext<T> extends AsyncRpcContext<T> {
         private final DataObject vo;
         private final AsyncCallFuture<VolumeApiResult> future;
         private final DataObject templateOnStore;
@@ -576,7 +576,7 @@ public class VolumeServiceImpl implements VolumeService {
         return volDao.persist(newVol);
     }
 
-    private class CopyVolumeContext<T> extends AsyncRpcConext<T> {
+    private class CopyVolumeContext<T> extends AsyncRpcContext<T> {
         final VolumeInfo srcVolume;
         final VolumeInfo destVolume;
         final AsyncCallFuture<VolumeApiResult> future;
@@ -780,7 +780,7 @@ public class VolumeServiceImpl implements VolumeService {
         return null;
     }
 
-    private class MigrateVolumeContext<T> extends AsyncRpcConext<T> {
+    private class MigrateVolumeContext<T> extends AsyncRpcContext<T> {
         final VolumeInfo srcVolume;
         final VolumeInfo destVolume;
         final AsyncCallFuture<VolumeApiResult> future;
@@ -848,7 +848,7 @@ public class VolumeServiceImpl implements VolumeService {
         return null;
     }
 
-    private class MigrateVmWithVolumesContext<T> extends AsyncRpcConext<T> {
+    private class MigrateVmWithVolumesContext<T> extends AsyncRpcContext<T> {
         final Map<VolumeInfo, DataStore> volumeToPool;
         final AsyncCallFuture<CommandResult> future;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcConext.java
----------------------------------------------------------------------
diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcConext.java b/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcConext.java
deleted file mode 100644
index 102364c..0000000
--- a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcConext.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.cloudstack.framework.async;
-
-public class AsyncRpcConext<T> {
-    protected final AsyncCompletionCallback<T> parentCallBack;
-    public AsyncRpcConext(AsyncCompletionCallback<T> callback) {
-        this.parentCallBack = callback;
-    }
-    
-    public AsyncCompletionCallback<T> getParentCallback() {
-        return this.parentCallBack;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcContext.java
----------------------------------------------------------------------
diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcContext.java b/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcContext.java
new file mode 100644
index 0000000..cdb2054
--- /dev/null
+++ b/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncRpcContext.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.framework.async;
+
+public class AsyncRpcContext<T> {
+    protected final AsyncCompletionCallback<T> parentCallBack;
+    public AsyncRpcContext(AsyncCompletionCallback<T> callback) {
+        this.parentCallBack = callback;
+    }
+
+    public AsyncCompletionCallback<T> getParentCallback() {
+        return this.parentCallBack;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/framework/ipc/test/org/apache/cloudstack/framework/codestyle/AsyncSampleEventDrivenStyleCaller.java
----------------------------------------------------------------------
diff --git a/framework/ipc/test/org/apache/cloudstack/framework/codestyle/AsyncSampleEventDrivenStyleCaller.java b/framework/ipc/test/org/apache/cloudstack/framework/codestyle/AsyncSampleEventDrivenStyleCaller.java
index db39588..d98ba65 100644
--- a/framework/ipc/test/org/apache/cloudstack/framework/codestyle/AsyncSampleEventDrivenStyleCaller.java
+++ b/framework/ipc/test/org/apache/cloudstack/framework/codestyle/AsyncSampleEventDrivenStyleCaller.java
@@ -24,7 +24,7 @@ import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCallbackDriver;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,7 +59,7 @@ public class AsyncSampleEventDrivenStyleCaller {
         }
     }
     
-    private class TestContext<T> extends AsyncRpcConext<T> {
+    private class TestContext<T> extends AsyncRpcContext<T> {
         private boolean finished;
         private String result;
         /**

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
index 3d2ad57..b3ea5d5 100644
--- a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
+++ b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
@@ -37,7 +37,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.test.utils.SpringUtils;
@@ -209,7 +209,7 @@ public class VmwareStorageMotionStrategyTest {
         assertFalse("Migration across cluster didn't fail.", this.result.isSuccess());
     }
 
-    private class MockContext<T> extends AsyncRpcConext<T> {
+    private class MockContext<T> extends AsyncRpcContext<T> {
         final Map<VolumeInfo, DataStore> volumeToPool;
         final AsyncCallFuture<CommandResult> future;
         /**

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
index 7b30575..21a5e0a 100644
--- a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
@@ -16,14 +16,14 @@
 // under the License.
 package org.apache.cloudstack.storage.datastore.lifecycle;
 
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.inject.Inject;
-
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.Discoverer;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.UriUtils;
 import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@@ -35,14 +35,12 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
 import org.apache.log4j.Logger;
 
-import com.cloud.agent.api.StoragePoolInfo;
-import com.cloud.exception.InvalidParameterValueException;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.resource.Discoverer;
-import com.cloud.resource.ResourceManager;
-import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.ScopeType;
-import com.cloud.utils.UriUtils;
+import javax.inject.Inject;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 
@@ -83,7 +81,7 @@ public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
         DataStoreRole role = (DataStoreRole) dsInfos.get("role");
         Map<String, String> details = (Map<String, String>) dsInfos.get("details");
 
-        s_logger.info("Trying to add a new host at " + url + " in data center " + dcId);
+        s_logger.info("Trying to add a new data store at " + url + " to data center " + dcId);
 
         URI uri = null;
         try {
@@ -103,7 +101,7 @@ public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 
         if (dcId == null) {
             throw new InvalidParameterValueException(
-                    "DataCenter id is null, and cloudstack default image storehas to be associated with a data center");
+                    "DataCenter id is null, and cloudstack default image store has to be associated with a data center");
         }
 
         Map<String, Object> imageStoreParameters = new HashMap<String, Object>();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/driver/SampleImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/driver/SampleImageStoreDriverImpl.java b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/driver/SampleImageStoreDriverImpl.java
index 44f94f3..66f4d77 100644
--- a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/driver/SampleImageStoreDriverImpl.java
+++ b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/driver/SampleImageStoreDriverImpl.java
@@ -39,13 +39,11 @@ public class SampleImageStoreDriverImpl extends BaseImageStoreDriverImpl {
 
     @Override
     public DataStoreTO getStoreTO(DataStore store) {
-        // TODO Auto-generated method stub
         return null;
     }
 
     @Override
     public String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format) {
-        // TODO Auto-generated method stub
         return null;
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
index 78f2263..1d00c97 100644
--- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
@@ -23,7 +23,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
 import org.apache.cloudstack.engine.subsystem.api.storage.*;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.CreateObjectCommand;
 import org.apache.cloudstack.storage.datastore.DataObjectManager;
@@ -54,12 +54,16 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
         return null;
     }
 
+<<<<<<< HEAD
     @Override
     public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
         return null;
     }
 
     private class CreateVolumeContext<T> extends AsyncRpcConext<T> {
+=======
+    private class CreateVolumeContext<T> extends AsyncRpcContext<T> {
+>>>>>>> Fix typo in class name
         private final DataObject volume;
         public CreateVolumeContext(AsyncCompletionCallback<T> callback, DataObject volume) {
             super(callback);
@@ -86,8 +90,8 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
         /*
          * DeleteCommand cmd = new DeleteCommand(vo.getUri());
          * 
-         * EndPoint ep = selector.select(vo); AsyncRpcConext<CommandResult>
-         * context = new AsyncRpcConext<CommandResult>(callback);
+         * EndPoint ep = selector.select(vo); AsyncRpcContext<CommandResult>
+         * context = new AsyncRpcContext<CommandResult>(callback);
          * AsyncCallbackDispatcher<SamplePrimaryDataStoreDriverImpl, Answer>
          * caller = AsyncCallbackDispatcher.create(this);
          * caller.setCallback(caller.getTarget().deleteCallback(null, null))
@@ -96,7 +100,7 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
     }
 
     public Void deleteCallback(AsyncCallbackDispatcher<SamplePrimaryDataStoreDriverImpl, Answer> callback,
-            AsyncRpcConext<CommandResult> context) {
+            AsyncRpcContext<CommandResult> context) {
         CommandResult result = new CommandResult();
         Answer answer = callback.getResult();
         if (!answer.getResult()) {
@@ -108,7 +112,7 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
 
     /*
      * private class CreateVolumeFromBaseImageContext<T> extends
-     * AsyncRpcConext<T> { private final VolumeObject volume;
+     * AsyncRpcContext<T> { private final VolumeObject volume;
      * 
      * public CreateVolumeFromBaseImageContext(AsyncCompletionCallback<T>
      * callback, VolumeObject volume) { super(callback); this.volume = volume; }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/de38cd81/server/src/com/cloud/template/HypervisorTemplateAdapter.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
index da8c1c4..869231a 100755
--- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java
+++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
@@ -39,7 +39,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcConext;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
 import org.apache.log4j.Logger;
@@ -168,13 +168,13 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
         return template;
     }
 
-    private class CreateTemplateContext<T> extends AsyncRpcConext<T> {
-        final TemplateInfo template;
-        public CreateTemplateContext(AsyncCompletionCallback<T> callback, TemplateInfo template) {
-            super(callback);
-            this.template = template;
-        }
-    }
+	private class CreateTemplateContext<T> extends AsyncRpcContext<T> {
+		final TemplateInfo template;
+		public CreateTemplateContext(AsyncCompletionCallback<T> callback, TemplateInfo template) {
+			super(callback);
+			this.template = template;
+		}
+	}
 
     protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher<HypervisorTemplateAdapter, TemplateApiResult> callback,
             CreateTemplateContext<TemplateApiResult> context) {


[50/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Removed useless file


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f7370196
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f7370196
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f7370196

Branch: refs/heads/vmsync
Commit: f7370196d18d6eec39fdac6063f7d4283e843c15
Parents: 57bafc8
Author: Alex Huang <al...@gmail.com>
Authored: Mon Jul 1 14:25:35 2013 -0700
Committer: Alex Huang <al...@gmail.com>
Committed: Mon Jul 1 14:25:35 2013 -0700

----------------------------------------------------------------------
 settings.xml.old | 98 ---------------------------------------------------
 1 file changed, 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f7370196/settings.xml.old
----------------------------------------------------------------------
diff --git a/settings.xml.old b/settings.xml.old
deleted file mode 100644
index 050af0c..0000000
--- a/settings.xml.old
+++ /dev/null
@@ -1,98 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements. See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership. The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License. You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied. See the License for the
-  specific language governing permissions and limitations
-  under the License.
--->
-<settings xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd" xmlns="http://maven.apache.org/SETTINGS/1.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <!--<localRepository>repo</localRepository>-->
-  <servers>
-    <server>
-      <username>admin</username>
-      <id>central</id>
-    </server>
-    <server>
-      <username>admin</username>
-      <id>snapshots</id>
-    </server>
-  </servers>
-  
-  <!--
-  <mirrors>
-    <mirror>
-      <id>central</id>
-      <name>Central</name>
-      <url>http://10.223.75.59:8081/nexus/content/repositories/central/
-      </url>
-      <mirrorOf>central</mirrorOf>
-    </mirror>
-  </mirrors>
- --> 
-  <profiles>
-    <profile>
-      <id>thirdparty</id>
-      <activation>
-        <property>
-          <name>nonoss</name>
-        </property>
-      </activation>
-      <repositories>
-<!--
-        <repository>
-          <id>thirdparty</id>
-          <name>3rd party</name>
-          <url>http://10.223.75.59:8081/nexus/content/repositories/thirdparty
-          </url>
-          <layout>default</layout>
-          <releases>
-            <enabled>true</enabled>
-            <updatePolicy>never</updatePolicy>
-          </releases>
-          <snapshots>
-            <enabled>false</enabled>
-            <updatePolicy>never</updatePolicy>
-          </snapshots>
-        </repository>
--->
-      </repositories>
-    </profile> 
-    <profile>
-      <repositories>
-        <repository>
-          <snapshots>
-            <enabled>false</enabled>
-          </snapshots>
-          <id>repo1</id>
-          <name>repo1</name>
-          <url>http://repo1.maven.org/maven2</url>
-        </repository>
-        <repository>
-          <snapshots>
-            <enabled>false</enabled>
-          </snapshots>
-          <id>central</id>
-          <name>libs-release</name>
-          <url>http://cs.ibuildthecloud.com/artifactory/libs-release</url>
-        </repository>
-      </repositories>
-      <id>artifactory</id>
-    </profile>
-  </profiles>
-  <activeProfiles>
-    <activeProfile>artifactory</activeProfile>
-  </activeProfiles>
-</settings>
-


[25/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Include license header for solidfire util

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/bd784fb8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/bd784fb8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/bd784fb8

Branch: refs/heads/vmsync
Commit: bd784fb8ec79b0f8d6f63529e5a17af5eda5bfd6
Parents: a24b8d8
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sat Jun 29 16:55:55 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 16:55:55 2013 +0530

----------------------------------------------------------------------
 .../storage/datastore/util/SolidFireUtil.java       | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/bd784fb8/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
index 839c5a5..26766e8 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
@@ -1,3 +1,19 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.datastore.util;
 
 import java.io.BufferedReader;


[05/50] [abbrv] SolidFire plug-in and related changes

Posted by ah...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/scripts/sharedFunctions.js
----------------------------------------------------------------------
diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js
index dd9a7d6..bf6464c 100644
--- a/ui/scripts/sharedFunctions.js
+++ b/ui/scripts/sharedFunctions.js
@@ -321,8 +321,8 @@ cloudStack.converters = {
   toBooleanText: function(booleanValue) {
     if(booleanValue == true)
       return "Yes";
-    else if(booleanValue == false)
-      return "No";
+
+    return "No";
   },
   convertHz: function(hz) {
     if (hz == null)

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/scripts/storage.js
----------------------------------------------------------------------
diff --git a/ui/scripts/storage.js b/ui/scripts/storage.js
index 2c03d39..ad0965a 100644
--- a/ui/scripts/storage.js
+++ b/ui/scripts/storage.js
@@ -132,6 +132,16 @@
                         else {
                           $diskSize.hide();
                         }
+                        var $minIops = $form.find('.form-item[rel=minIops]');
+                        var $maxIops = $form.find('.form-item[rel=maxIops]');
+                        if (selectedDiskOfferingObj.iscustomizediops == true) {
+                          $minIops.css('display', 'inline-block');
+                          $maxIops.css('display', 'inline-block');
+                        }
+                        else {
+                          $minIops.hide();
+                          $maxIops.hide();
+                        }
                       });
                     }
                   }
@@ -141,7 +151,19 @@
                     label: 'label.disk.size.gb',
                     validation: { required: true, number: true },
                     isHidden: true
-                  }
+                  },
+                  
+                  minIops: {
+                    label: 'label.disk.iops.min',
+                    validation: { required: false, number: true },
+                    isHidden: true
+                  },
+                  
+                  maxIops: {
+                    label: 'label.disk.iops.max',
+                    validation: { required: false, number: true },
+                    isHidden: true
+                  },
 
                 }
               },
@@ -159,6 +181,20 @@
 									  size: args.data.diskSize
 									});
                 }
+                
+                if (selectedDiskOfferingObj.iscustomizediops == true) {
+                    if (args.data.minIops != "" && args.data.minIops > 0) {
+								  $.extend(data, {
+									  miniops: args.data.minIops
+									});
+					}
+				    
+				    if (args.data.maxIops != "" && args.data.maxIops > 0) {
+								  $.extend(data, {
+									  maxiops: args.data.maxIops
+									});
+					}
+                }
 
                 $.ajax({
                   url: createURL('createVolume'),
@@ -1228,6 +1264,24 @@
                           return cloudStack.converters.convertBytes(args);
                       }
                     },
+                    miniops: {
+                      label: 'label.disk.iops.min',
+                      converter: function(args) {
+                        if(args == null || args == 0)
+                          return "";
+                        else
+                          return args;
+                      }
+                    },
+                    maxiops: {
+                      label: 'label.disk.iops.max',
+                      converter: function(args) {
+                        if(args == null || args == 0)
+                          return "";
+                        else
+                          return args;
+                      }
+                    },
                     virtualmachineid: {
                       label: 'VM ID',
                       converter: function(args) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/scripts/system.js
----------------------------------------------------------------------
diff --git a/ui/scripts/system.js b/ui/scripts/system.js
index 92faf5e..135307b 100644
--- a/ui/scripts/system.js
+++ b/ui/scripts/system.js
@@ -12745,11 +12745,11 @@
                   {
                     id: { label: 'label.id' },
                     state: { label: 'label.state' },
-										tags: {
-										  label: 'label.storage.tags',
-											isEditable: true
-										},
-										podname: { label: 'label.pod' },
+					tags: {
+						label: 'label.storage.tags',
+						isEditable: true
+					},
+					podname: { label: 'label.pod' },
                     clustername: { label: 'label.cluster' },
                     type: { label: 'label.type' },
                     ipaddress: { label: 'label.ip.address' },
@@ -12771,6 +12771,15 @@
                         else
                           return cloudStack.converters.convertBytes(args);
                       }
+                    },
+                    capacityiops: {
+                      label: 'label.disk.iops.total',
+                      converter: function(args) {
+                        if (args == null || args == 0)
+                          return "";
+                        else
+                          return args;
+                      }
                     }
                   }
                 ],

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/utils/src/com/cloud/utils/StringUtils.java
----------------------------------------------------------------------
diff --git a/utils/src/com/cloud/utils/StringUtils.java b/utils/src/com/cloud/utils/StringUtils.java
index 359b169..db32dd4 100644
--- a/utils/src/com/cloud/utils/StringUtils.java
+++ b/utils/src/com/cloud/utils/StringUtils.java
@@ -49,6 +49,14 @@ public class StringUtils {
         return org.apache.commons.lang.StringUtils.join(components, delimiter);
     }
 
+    public static boolean isNotBlank(String str) {
+        if (str != null && str.trim().length() > 0) {
+            return true;
+        }
+
+        return false;
+    }
+
     /**
      * @param tags
      * @return List of tags

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java
index 3dcd724..7c548ff 100755
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java
@@ -26,14 +26,16 @@ import com.vmware.vim25.CustomFieldStringValue;
 import com.vmware.vim25.DatastoreInfo;
 import com.vmware.vim25.DynamicProperty;
 import com.vmware.vim25.HostNasVolumeSpec;
+import com.vmware.vim25.HostScsiDisk;
 import com.vmware.vim25.ManagedObjectReference;
 import com.vmware.vim25.NasDatastoreInfo;
 import com.vmware.vim25.ObjectContent;
 import com.vmware.vim25.ObjectSpec;
 import com.vmware.vim25.PropertyFilterSpec;
 import com.vmware.vim25.PropertySpec;
-import com.vmware.vim25.SelectionSpec;
 import com.vmware.vim25.TraversalSpec;
+import com.vmware.vim25.VmfsDatastoreCreateSpec;
+import com.vmware.vim25.VmfsDatastoreOption;
 
 public class HostDatastoreSystemMO extends BaseMO {
 
@@ -122,6 +124,22 @@ public class HostDatastoreSystemMO extends BaseMO {
 		return null;
 	}
 
+	public List<HostScsiDisk> queryAvailableDisksForVmfs() throws Exception {
+		return _context.getService().queryAvailableDisksForVmfs(_mor, null);
+	}
+
+	public ManagedObjectReference createVmfsDatastore(String datastoreName, HostScsiDisk hostScsiDisk) throws Exception {
+		// just grab the first instance of VmfsDatastoreOption
+		VmfsDatastoreOption vmfsDatastoreOption = _context.getService().queryVmfsDatastoreCreateOptions(_mor, hostScsiDisk.getDevicePath(), 4).get(0);
+
+		VmfsDatastoreCreateSpec vmfsDatastoreCreateSpec = (VmfsDatastoreCreateSpec)vmfsDatastoreOption.getSpec();
+
+		// set the name of the datastore to be created
+		vmfsDatastoreCreateSpec.getVmfs().setVolumeName(datastoreName);
+
+		return _context.getService().createVmfsDatastore(_mor, vmfsDatastoreCreateSpec);
+	}
+
 	public boolean deleteDatastore(String name) throws Exception {
 		ManagedObjectReference morDatastore = findDatastore(name);
 		if(morDatastore != null) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java
index a866fdc..e7fd922 100755
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java
@@ -149,6 +149,13 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost {
 		throw new Exception("Could not find host default gateway, host is not properly configured?");
 	}
 
+	public HostStorageSystemMO getHostStorageSystemMO() throws Exception {
+		return new HostStorageSystemMO(_context,
+			(ManagedObjectReference)_context.getVimClient().getDynamicProperty(
+				_mor, "configManager.storageSystem")
+		);
+	}
+
 	public HostDatastoreSystemMO getHostDatastoreSystemMO() throws Exception {
 		return new HostDatastoreSystemMO(_context,
 			(ManagedObjectReference)_context.getVimClient().getDynamicProperty(
@@ -797,14 +804,14 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost {
 	}
 
 	@Override
-	public void unmountDatastore(String poolUuid) throws Exception {
+	public void unmountDatastore(String uuid) throws Exception {
 
 	    if(s_logger.isTraceEnabled())
-			s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid);
+			s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.getValue() + ", uuid: " + uuid);
 
     	HostDatastoreSystemMO hostDatastoreSystemMo = getHostDatastoreSystemMO();
-    	if(!hostDatastoreSystemMo.deleteDatastore(poolUuid)) {
-    		String msg = "Unable to unmount datastore. uuid: " + poolUuid;
+    	if(!hostDatastoreSystemMo.deleteDatastore(uuid)) {
+    		String msg = "Unable to unmount datastore. uuid: " + uuid;
     		s_logger.error(msg);
 
     		if(s_logger.isTraceEnabled())

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java
new file mode 100644
index 0000000..d400185
--- /dev/null
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostStorageSystemMO.java
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.vmware.mo;
+
+import java.util.List;
+
+import com.cloud.hypervisor.vmware.util.VmwareContext;
+
+import com.vmware.vim25.HostInternetScsiHbaStaticTarget;
+import com.vmware.vim25.HostStorageDeviceInfo;
+import com.vmware.vim25.ManagedObjectReference;
+
+public class HostStorageSystemMO extends BaseMO {
+	public HostStorageSystemMO(VmwareContext context, ManagedObjectReference morHostDatastore) {
+		super(context, morHostDatastore);
+	}
+
+	public HostStorageSystemMO(VmwareContext context, String morType, String morValue) {
+		super(context, morType, morValue);
+	}
+	
+	public HostStorageDeviceInfo getStorageDeviceInfo() throws Exception {
+		return (HostStorageDeviceInfo)_context.getVimClient().getDynamicProperty(_mor, "storageDeviceInfo");
+	}
+	
+	public void addInternetScsiStaticTargets(String iScsiHbaDevice, List<HostInternetScsiHbaStaticTarget> lstTargets) throws Exception {
+		_context.getService().addInternetScsiStaticTargets(_mor, iScsiHbaDevice, lstTargets);
+	}
+	
+	public void removeInternetScsiStaticTargets(String iScsiHbaDevice, List<HostInternetScsiHbaStaticTarget> lstTargets) throws Exception {
+		_context.getService().removeInternetScsiStaticTargets(_mor, iScsiHbaDevice, lstTargets);
+	}
+	
+	public void rescanHba(String iScsiHbaDevice) throws Exception {
+		_context.getService().rescanHba(_mor, iScsiHbaDevice);
+	}
+}


[23/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Add UCS daos for simulator context

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1e0f8337
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1e0f8337
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1e0f8337

Branch: refs/heads/vmsync
Commit: 1e0f8337729d8ce81043c6c05add846871b37923
Parents: 49ded7e
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sat Jun 29 15:07:40 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 15:09:57 2013 +0530

----------------------------------------------------------------------
 client/tomcatconf/simulatorComponentContext.xml.in | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1e0f8337/client/tomcatconf/simulatorComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in
index 74312fe..579ae1b 100644
--- a/client/tomcatconf/simulatorComponentContext.xml.in
+++ b/client/tomcatconf/simulatorComponentContext.xml.in
@@ -207,10 +207,10 @@
   </bean>
 
   <!--
-    Image Store
+    UCS support components
   -->
-
-
+  <bean id="ucsBladeDaoImpl" class="com.cloud.ucs.database.UcsBladeDaoImpl"/>
+  <bean id="ucsManagerDaoImpl" class="com.cloud.ucs.database.UcsManagerDaoImpl"/>
 
   <bean id="GlobalLoadBalancingRulesServiceImpl"
         class="org.apache.cloudstack.region.gslb.GlobalLoadBalancingRulesServiceImpl"/>


[37/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Adding axis2.xml to cloudstack-bridge/webapps/awsapi/WEB-INF/conf as part of defect CLOUDSTACK-2927


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b4f6b57e
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b4f6b57e
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b4f6b57e

Branch: refs/heads/vmsync
Commit: b4f6b57ef5242a2faf9df386aaff6e00ad388e03
Parents: 9e62542
Author: Rayeesn <ra...@citrix.com>
Authored: Fri Jun 28 17:26:12 2013 -0700
Committer: Likitha Shetty <li...@citrix.com>
Committed: Mon Jul 1 11:43:09 2013 +0530

----------------------------------------------------------------------
 packaging/centos63/cloud.spec | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b4f6b57e/packaging/centos63/cloud.spec
----------------------------------------------------------------------
diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec
index 1f112dd..f7eb629 100644
--- a/packaging/centos63/cloud.spec
+++ b/packaging/centos63/cloud.spec
@@ -299,6 +299,7 @@ cp -r awsapi/target/cloud-awsapi-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{n
 install -D awsapi-setup/setup/cloud-setup-bridge ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-setup-bridge
 install -D awsapi-setup/setup/cloudstack-aws-api-register ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-aws-api-register
 cp -r awsapi-setup/db/mysql/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup
+cp awsapi/resource/Axis2/axis2.xml ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/conf
 
 for name in applicationContext.xml cloud-bridge.properties commons-logging.properties crypto.properties xes.keystore ec2-service.properties ; do
   mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/$name \


[10/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3215: Cannot Deploy VM when using S3 object store without NFS Cache

Signed-off-by: Edison Su <su...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/58f92028
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/58f92028
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/58f92028

Branch: refs/heads/vmsync
Commit: 58f9202818ffe78f3cd6748fc8710433d11bd5a2
Parents: ed00427
Author: Donal Lafferty <do...@citrix.com>
Authored: Fri Jun 28 16:51:44 2013 -0700
Committer: Edison Su <su...@gmail.com>
Committed: Fri Jun 28 16:52:06 2013 -0700

----------------------------------------------------------------------
 .../cache/manager/StorageCacheManagerImpl.java  |  8 ++-
 .../motion/AncientDataMotionStrategy.java       | 53 +++++++++++++-------
 2 files changed, 42 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/58f92028/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java
index 4b4e521..a810772 100644
--- a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java
+++ b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java
@@ -147,7 +147,7 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager {
                         object = cacheReplacementAlgorithm.chooseOneToBeReplaced(store);
                         findAStore = store;
                         if (object != null) {
-                              break;
+                            break;
                         }
                     }
 
@@ -230,6 +230,12 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager {
     @Override
     public DataObject createCacheObject(DataObject data, Scope scope) {
         DataStore cacheStore = this.getCacheStorage(scope);
+
+        if (cacheStore == null)
+        {
+            String errMsg = "No cache DataStore in scope id " + scope.getScopeId() + " type " + scope.getScopeType().toString();
+            throw new CloudRuntimeException(errMsg);
+        }
         return this.createCacheObject(data, cacheStore);
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/58f92028/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index 631de6a..04d0436 100644
--- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -35,6 +35,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
 import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
@@ -57,6 +58,7 @@ import com.cloud.configuration.Config;
 import com.cloud.configuration.dao.ConfigurationDao;
 import com.cloud.host.Host;
 import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.DataStoreRole;
 import com.cloud.storage.StorageManager;
 import com.cloud.storage.StoragePool;
@@ -135,6 +137,22 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         if (destStoreTO instanceof NfsTO || destStoreTO.getRole() == DataStoreRole.ImageCache) {
             return false;
         }
+
+        if (srcData.getType() == DataObjectType.TEMPLATE) {
+            TemplateInfo template = (TemplateInfo)srcData;
+            if (template.getHypervisorType() == HypervisorType.Hyperv) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("needCacheStorage false due to src TemplateInfo, which is DataObjectType.TEMPLATE of HypervisorType.Hyperv");
+                }
+                return false;
+            }
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("needCacheStorage true, dest at " +
+                    destTO.getPath() + " dest role " + destStoreTO.getRole().toString() +
+                    srcTO.getPath() + " src role " + srcStoreTO.getRole().toString() );
+        }
         return true;
     }
 
@@ -157,26 +175,24 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         int _primaryStorageDownloadWait = NumbersUtil.parseInt(value,
                 Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
         Answer answer = null;
+        boolean usingCache = false;
         DataObject cacheData = null;
+        DataObject srcForCopy = srcData;
         try {
             if (needCacheStorage(srcData, destData)) {
-                // need to copy it to image cache store
                 Scope destScope = getZoneScope(destData.getDataStore().getScope());
-                cacheData = cacheMgr.createCacheObject(srcData, destScope);
-                CopyCommand cmd = new CopyCommand(cacheData.getTO(), destData.getTO(), _primaryStorageDownloadWait);
-                EndPoint ep = selector.select(cacheData, destData);
-                answer = ep.sendMessage(cmd);
-            } else {
-                // handle copy it to/from cache store
-                CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _primaryStorageDownloadWait);
-                EndPoint ep = selector.select(srcData, destData);
-                answer = ep.sendMessage(cmd);
+                srcForCopy = cacheData = cacheMgr.createCacheObject(srcData, destScope);
             }
+
+            CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), destData.getTO(), _primaryStorageDownloadWait);
+            EndPoint ep = selector.select(srcForCopy, destData);
+            answer = ep.sendMessage(cmd);
+
             if (cacheData != null) {
                 if (answer == null || !answer.getResult()) {
-                    cacheMgr.deleteCacheObject(cacheData);
+                    cacheMgr.deleteCacheObject(srcForCopy);
                 } else {
-                    cacheMgr.releaseCacheObject(cacheData);
+                    cacheMgr.releaseCacheObject(srcForCopy);
                 }
             }
             return answer;
@@ -187,7 +203,6 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
             }
             throw new CloudRuntimeException(e.toString());
         }
-
     }
 
     protected DataObject cacheSnapshotChain(SnapshotInfo snapshot) {
@@ -204,10 +219,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
     }
 
     protected void deleteSnapshotCacheChain(SnapshotInfo snapshot) {
-       while (snapshot != null) {
-           cacheMgr.deleteCacheObject(snapshot);
-           snapshot = snapshot.getParent();
-       }
+        while (snapshot != null) {
+            cacheMgr.deleteCacheObject(snapshot);
+            snapshot = snapshot.getParent();
+        }
     }
 
     protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) {
@@ -317,6 +332,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         Answer answer = null;
         String errMsg = null;
         try {
+            s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() +
+                    " copyAsync inspecting dest type " + destData.getType().toString());
 
             if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
                 answer = copyVolumeFromSnapshot(srcData, destData);
@@ -404,7 +421,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
 
     @Override
     public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
-            AsyncCompletionCallback<CopyCommandResult> callback) {
+                          AsyncCompletionCallback<CopyCommandResult> callback) {
         CopyCommandResult result = new CopyCommandResult(null, null);
         result.setResult("Unsupported operation requested for copying data.");
         callback.complete(result);


[33/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-2813 - Some deployment failures do not release the resources.
Applying the short term fix of force cleaning up if the answer recieved from startcommand is not valid
Signed off by : nitin mehta<ni...@citrix.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/4c0425f9
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/4c0425f9
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/4c0425f9

Branch: refs/heads/vmsync
Commit: 4c0425f9189f1dce889d0f1532f7823f9381757b
Parents: 3684baf
Author: Nitin Mehta <ni...@citrix.com>
Authored: Sun Jun 30 14:51:36 2013 +0530
Committer: Nitin Mehta <ni...@citrix.com>
Committed: Sun Jun 30 14:53:32 2013 +0530

----------------------------------------------------------------------
 server/src/com/cloud/vm/VirtualMachineManagerImpl.java | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/4c0425f9/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 5a28e67..3bc0063 100755
--- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -833,6 +833,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                 ClusterDetailsVO cluster_detail_ram =  _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
                 vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
                 vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
+                StartAnswer startAnswer = null;
 
                 try {
                     if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
@@ -877,7 +878,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     _workDao.updateStep(work, Step.Started);
 
 
-                    StartAnswer startAnswer = cmds.getAnswer(StartAnswer.class);
+                    startAnswer = cmds.getAnswer(StartAnswer.class);
                     if (startAnswer != null && startAnswer.getResult()) {
                         String host_guid = startAnswer.getHost_guid();
                         if( host_guid != null ) {
@@ -891,6 +892,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                             if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
                                 throw new ConcurrentOperationException("Unable to transition to a new state.");
                             }
+
                             startedVm = vm;
                             if (s_logger.isDebugEnabled()) {
                                 s_logger.debug("Start completed for VM " + vm);
@@ -946,7 +948,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
                     if (startedVm == null && canRetry) {
                         Step prevStep = work.getStep();
                         _workDao.updateStep(work, Step.Release);
-                        if (prevStep == Step.Started || prevStep == Step.Starting) {
+                        // If previous step was started/ing && we got a valid answer
+                        if((prevStep == Step.Started || prevStep == Step.Starting) && (startAnswer != null && startAnswer.getResult())){  //TODO check the response of cleanup and record it in DB for retry
                             cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false, caller, account);
                         } else {
                             //if step is not starting/started, send cleanup command with force=true


[04/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Revert "Cloudstack-2150 DB table entries of phisical network is not proper.Shows Duplicate entries Cloudstack-2980 Adding a VLAN range that overlaps with two existing ranges results in inconsistent DB entries"

This reverts commit 7f2f25d640b47c8e72381bfc318a7bbbb1ca2bfd.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/02ab2eb3
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/02ab2eb3
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/02ab2eb3

Branch: refs/heads/vmsync
Commit: 02ab2eb3848df2703399bdda7fa1608a52ec156c
Parents: dfb2e1d
Author: Prachi Damle <pr...@cloud.com>
Authored: Fri Jun 28 14:58:32 2013 -0700
Committer: Prachi Damle <pr...@cloud.com>
Committed: Fri Jun 28 14:58:32 2013 -0700

----------------------------------------------------------------------
 .../com/cloud/dc/dao/DataCenterVnetDaoImpl.java |  2 +-
 .../com/cloud/network/NetworkServiceImpl.java   | 85 ++++++++++++--------
 2 files changed, 53 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/02ab2eb3/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java
index a2f7cde..e97f2c6 100755
--- a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java
@@ -113,7 +113,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase<DataCenterVnetVO, Long
     
     @DB
     public void add(long dcId, long physicalNetworkId, int start, int end) {
-        String insertVnet = "INSERT IGNORE INTO `cloud`.`op_dc_vnet_alloc` (vnet, data_center_id, physical_network_id) VALUES ( ?, ?, ?)";
+        String insertVnet = "INSERT INTO `cloud`.`op_dc_vnet_alloc` (vnet, data_center_id, physical_network_id) VALUES ( ?, ?, ?)";
         
         Transaction txn = Transaction.currentTxn();
         try {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/02ab2eb3/server/src/com/cloud/network/NetworkServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java
index ac5d949..5e900bb 100755
--- a/server/src/com/cloud/network/NetworkServiceImpl.java
+++ b/server/src/com/cloud/network/NetworkServiceImpl.java
@@ -2561,7 +2561,7 @@ public class NetworkServiceImpl extends ManagerBase implements  NetworkService {
 
         if (removeVlan != null){
             List<Integer> tokens = processVlanRange(network,removeVlan);
-            removeVlanRange(network, tokens.get(0), tokens.get(1));
+            boolean result = removeVlanRange(network, tokens.get(0), tokens.get(1));
         }
 
         if (tags != null && tags.size() > 1) {
@@ -2589,6 +2589,7 @@ public class NetworkServiceImpl extends ManagerBase implements  NetworkService {
             network.setSpeed(networkSpeed);
         }
 
+        // Vnet range can be extended only
         boolean AddVnet = true;
         List<Pair<Integer, Integer>> vnetsToAdd = new ArrayList<Pair<Integer, Integer>>();
 
@@ -2605,6 +2606,7 @@ public class NetworkServiceImpl extends ManagerBase implements  NetworkService {
                     int existingStartVnet = existingRanges.get(j).first();
                     int existingEndVnet = existingRanges.get(j).second();
 
+                    // check if vnet is being extended
                     if (newStartVnet.intValue() >= existingStartVnet & newEndVnet.intValue() <= existingEndVnet) {
                         throw new InvalidParameterValueException("The vlan range you trying to add already exists.");
                     }
@@ -2628,62 +2630,79 @@ public class NetworkServiceImpl extends ManagerBase implements  NetworkService {
                         vnetsToAdd.add(new Pair<Integer, Integer>(existingEndVnet+1,newEndVnet));
                         existingRanges.get(j).first(newStartVnet);
                         existingRanges.get(j).second(newEndVnet);
-                        AddVnet = false;
                         break;
                     }
                 }
 
             }
             if (AddVnet){
-                vnetsToAdd.add(new Pair<Integer, Integer>(newStartVnet, newEndVnet));
-                existingRanges.add(new Pair<Integer, Integer>(newStartVnet,newEndVnet));
-                j= existingRanges.size()-1;
+                    vnetsToAdd.add(new Pair<Integer, Integer>(newStartVnet, newEndVnet));
+                    existingRanges.add(new Pair<Integer, Integer>(newStartVnet,newEndVnet));
+            }
+
+            Map <Integer,Integer> vnetMap = new HashMap<Integer, Integer>(existingRanges.size());
+            Map <Integer, Integer> IndexMap = new HashMap<Integer, Integer>(existingRanges.size());
+            for (int i=0; i< existingRanges.size(); i++){
+                 vnetMap.put(existingRanges.get(i).first(),existingRanges.get(i).second());
+                 IndexMap.put(existingRanges.get(i).first(),i);
             }
+
             Integer value;
+            Integer index;
             String vnetString = "";
-
-            Integer changed_entry_start = existingRanges.get(j).first();
-            Integer changed_entry_end = existingRanges.get(j).second();
             for (int i=0; i < existingRanges.size(); i++){
-                if (i !=j  && existingRanges.get(i).first()<=changed_entry_end && existingRanges.get(i).second() >= changed_entry_end) {
-                    existingRanges.get(j).second(existingRanges.get(i).second());
+                 value = vnetMap.get((existingRanges.get(i).second()+1));
+                 if (value != null) {
+                     vnetMap.remove((existingRanges.get(i).second()+1));
+                     vnetMap.remove(existingRanges.get(i).first());
+                     vnetMap.put(existingRanges.get(i).first(),value);
+                     existingRanges.add(new Pair<Integer,Integer>(existingRanges.get(i).first(),value));
+                     index = IndexMap.get(existingRanges.get(i).second()+1);
+                     existingRanges.get(index).first(-1);
+                     existingRanges.get(index).second(-1);
+                     existingRanges.get(i).first(-1);
+                     existingRanges.get(i).second(-1);
+                 }
+                value = vnetMap.get((existingRanges.get(i).second()));
+                if (value != null && ( (existingRanges.get(i).second()) != (existingRanges.get(i).first()) )) {
+                    vnetMap.remove((existingRanges.get(i).second()));
+                    vnetMap.remove(existingRanges.get(i).first());
+                    vnetMap.put(existingRanges.get(i).first(),value);
+                    existingRanges.add(new Pair<Integer,Integer>(existingRanges.get(i).first(),value));
+                    index = IndexMap.get(existingRanges.get(i).second());
+                    existingRanges.get(index).first(-1);
+                    existingRanges.get(index).second(-1);
                     existingRanges.get(i).first(-1);
                     existingRanges.get(i).second(-1);
-                } else if ((i !=j  && changed_entry_end > existingRanges.get(i).second()) && changed_entry_start <= existingRanges.get(i).first()) {
-                    existingRanges.get(i).first(-1);
-                    existingRanges.get(i).second(-1);
-                }else if ((i != j ) &&  changed_entry_end > existingRanges.get(i).second() && changed_entry_start <= existingRanges.get(i).second() &&  existingRanges.get(i).first() <= changed_entry_start) {
-                    existingRanges.get(j).first(existingRanges.get(i).first());
-                    existingRanges.get(i).first(-1);
-                    existingRanges.get(i).first(-1);
                 }
             }
 
 
-            for (Pair<Integer,Integer> vnetRange : existingRanges ){
-                value=vnetRange.first();
-                if (value != -1){
-                    vnetString = vnetString+vnetRange.first().toString()+"-"+vnetRange.second().toString()+";";
-                }
-            }
-            if (vnetString.length() > 0 && vnetString.charAt(vnetString.length()-1)==';') {
-                vnetString = vnetString.substring(0, vnetString.length()-1);
+
+            if (newVnetRangeString != null) {
+               for (Pair<Integer,Integer> vnetRange : existingRanges ){
+                    value=vnetMap.get(vnetRange.first());
+                    if (value != null){
+                        vnetString = vnetString+vnetRange.first().toString()+"-"+value.toString()+";";
+                    }
+               }
+               if (vnetString.length() > 0 && vnetString.charAt(vnetString.length()-1)==';') {
+                   vnetString = vnetString.substring(0, vnetString.length()-1);
+               }
+               network.setVnet(vnetString);
             }
-            network.setVnet(vnetString);
-            Transaction txn = Transaction.currentTxn();
-            txn.start();
+
             for (Pair<Integer, Integer> vnetToAdd : vnetsToAdd) {
                 s_logger.debug("Adding vnet range " + vnetToAdd.first() + "-" + vnetToAdd.second() + " for the physicalNetwork id= " + id + " and zone id=" + network.getDataCenterId()
-                        + " as a part of updatePhysicalNetwork call");
+                    + " as a part of updatePhysicalNetwork call");
                 _dcDao.addVnet(network.getDataCenterId(), network.getId(), vnetToAdd.first(), vnetToAdd.second());
             }
-            _physicalNetworkDao.update(id, network);
-            txn.commit();
         }
 
-        return  network;
-    }
+        _physicalNetworkDao.update(id, network);
 
+        return network;
+    }
 
     private List<Integer> processVlanRange(PhysicalNetworkVO network, String removeVlan) {
         Integer StartVnet;


[15/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3074: include support for vmwaredc mapping to zone

vmware dc can be mapped to the zone after CLOUDSTACK-1963. include
support in marvin for adding the vmwaredc.

vmwaredc : {
    name:
    username:
    vcenter:
    password:
    zoneid:
}

Will be sent during pod creation before cluster creation.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f3e30486
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f3e30486
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f3e30486

Branch: refs/heads/vmsync
Commit: f3e30486825c69670eec733517c8aa766c299dc1
Parents: 87c401a
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sat Jun 29 00:38:52 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 10:03:51 2013 +0530

----------------------------------------------------------------------
 tools/marvin/marvin/configGenerator.py  | 10 ++++++++++
 tools/marvin/marvin/deployDataCenter.py | 16 ++++++++++++++--
 2 files changed, 24 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f3e30486/tools/marvin/marvin/configGenerator.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/configGenerator.py b/tools/marvin/marvin/configGenerator.py
index 812bfec..a966ae0 100644
--- a/tools/marvin/marvin/configGenerator.py
+++ b/tools/marvin/marvin/configGenerator.py
@@ -105,10 +105,20 @@ class pod(object):
         self.endip = None
         self.zoneid = None
         self.clusters = []
+        self.vmwaredc = []
         '''Used in basic network mode'''
         self.guestIpRanges = []
 
 
+class VmwareDc(object):
+    def __init__(self):
+        self.zoneid = None
+        self.name = None
+        self.vcenter = None
+        self.username = None
+        self.password = None
+
+
 class cluster(object):
     def __init__(self):
         self.clustername = None

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f3e30486/tools/marvin/marvin/deployDataCenter.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py
index 941b6e9..961718a 100644
--- a/tools/marvin/marvin/deployDataCenter.py
+++ b/tools/marvin/marvin/deployDataCenter.py
@@ -54,10 +54,22 @@ specify a valid config file" % cfgFile)
             hostcmd.hypervisor = hypervisor
             self.apiClient.addHost(hostcmd)
 
-    def createClusters(self, clusters, zoneId, podId):
+    def addVmWareDataCenter(self, vmwareDc):
+        vdc = addVmwareDc.addVmwareDcCmd()
+        vdc.zoneid = vmwareDc.zoneid
+        vdc.name = vmwareDc.name
+        vdc.vcenter = vmwareDc.vcenter
+        vdc.username = vmwareDc.username
+        vdc.password = vmwareDc.password
+        self.apiClient.addVmwareDc(vdc)
+
+    def createClusters(self, clusters, zoneId, podId, vmwareDc=None):
         if clusters is None:
             return
 
+        if vmwareDc:
+            self.addVmWareDataCenter(vmwareDc)
+
         for cluster in clusters:
             clustercmd = addCluster.addClusterCmd()
             clustercmd.clustername = cluster.clustername
@@ -108,7 +120,7 @@ specify a valid config file" % cfgFile)
                 self.createVlanIpRanges("Basic", pod.guestIpRanges, zoneId,
                                         podId, networkId)
 
-            self.createClusters(pod.clusters, zoneId, podId)
+            self.createClusters(pod.clusters, zoneId, podId, vmwareDc=pod.vmwaredc)
 
     def createVlanIpRanges(self, mode, ipranges, zoneId, podId=None,
                            networkId=None, forvirtualnetwork=None):


[44/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
readibilty of log- and exception messages

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/639592f3
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/639592f3
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/639592f3

Branch: refs/heads/vmsync
Commit: 639592f3b7ef1baede230110cb1e1b6bf30d8b1e
Parents: ffd5f1a
Author: Daan Hoogland <dh...@schubergphilis.com>
Authored: Mon Jul 1 12:41:41 2013 +0200
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Mon Jul 1 20:17:11 2013 +0530

----------------------------------------------------------------------
 server/src/com/cloud/storage/StorageManagerImpl.java | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/639592f3/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 138c6d7..b64b202 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -658,7 +658,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             try {
                 scopeType = Enum.valueOf(ScopeType.class, scope.toUpperCase());
             } catch (Exception e) {
-                throw new InvalidParameterValueException("invalid scope" + scope);
+                throw new InvalidParameterValueException("invalid scope for pool " + scope);
             }
         }
 
@@ -678,7 +678,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 try {
                     hypervisorType = HypervisorType.getType(hypervisor);
                 } catch (Exception e) {
-                    throw new InvalidParameterValueException("invalid hypervisor type" + hypervisor);
+                    throw new InvalidParameterValueException("invalid hypervisor type " + hypervisor);
                 }
             } else {
                 throw new InvalidParameterValueException(
@@ -812,9 +812,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                     try {
                         future.get();
                     } catch (InterruptedException e) {
-                        s_logger.debug("expunge volume failed" + vol.getId(), e);
+                        s_logger.debug("expunge volume failed:" + vol.getId(), e);
                     } catch (ExecutionException e) {
-                        s_logger.debug("expunge volume failed" + vol.getId(), e);
+                        s_logger.debug("expunge volume failed:" + vol.getId(), e);
                     }
                 }
             }
@@ -822,7 +822,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             // Check if the pool has associated volumes in the volumes table
             // If it does , then you cannot delete the pool
             if (vlms.first() > 0) {
-                throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated vols" + " for this pool");
+                throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated volumes for this pool");
             }
         }
 
@@ -1277,7 +1277,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     public void onManagementNodeLeft(List<ManagementServerHostVO> nodeList, long selfNodeId) {
         for (ManagementServerHostVO vo : nodeList) {
             if (vo.getMsid() == _serverId) {
-                s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + vo.getMsid());
+                s_logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid());
                 List<Long> poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid());
                 if (poolIds.size() > 0) {
                     for (Long poolId : poolIds) {
@@ -1802,7 +1802,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 scopeType = Enum.valueOf(ScopeType.class, scope.toUpperCase());
 
             } catch (Exception e) {
-                throw new InvalidParameterValueException("invalid scope" + scope);
+                throw new InvalidParameterValueException("invalid scope for cache store " + scope);
             }
 
             if (scopeType != ScopeType.ZONE) {


[47/50] [abbrv] Another merge from master. This is just getting laborious

Posted by ah...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/storage/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/storage/VolumeManagerImpl.java
index da2f9c4,30fb322..585468b
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@@ -41,9 -41,10 +41,10 @@@ import org.apache.cloudstack.api.comman
  import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd;
  import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
  import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.UpdateVolumeCmd;
  import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
 -
 -import com.cloud.storage.dao.*;
 -import org.apache.cloudstack.api.command.user.volume.*;
 +import org.apache.cloudstack.context.CallContext;
++import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
  import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
  import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
  import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
@@@ -517,10 -524,10 +522,10 @@@ public class VolumeManagerImpl extends 
          newVol.setFormat(oldVol.getFormat());
          return _volsDao.persist(newVol);
      }
 -
 +    
      @DB
      protected VolumeInfo createVolumeFromSnapshot(VolumeVO volume,
-             SnapshotVO snapshot) {
+             SnapshotVO snapshot) throws StorageUnavailableException {
          Account account = _accountDao.findById(volume.getAccountId());
  
          final HashSet<StoragePool> poolsToAvoid = new HashSet<StoragePool>();
@@@ -544,10 -551,16 +549,16 @@@
              while ((pool = storageMgr.findStoragePool(dskCh, dc, pod.first(), null, null,
                      null, poolsToAvoid)) != null) {
                  break;
 -
 +                
              }
          }
 -
 +        
+         if (pool == null) {
+             String msg = "There are no available storage pools to store the volume in";
+             s_logger.info(msg);
+             throw new StorageUnavailableException(msg, -1);
+         }
+ 
          VolumeInfo vol = volFactory.getVolume(volume.getId());
          DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
          SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Image);
@@@ -1396,8 -1444,9 +1442,9 @@@
          Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId());
  
          VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(),
-                 owner.getDomainId(), owner.getId(), offering.getId(), size);
+                 owner.getDomainId(), owner.getId(), offering.getId(), size,
+                 offering.getMinIops(), offering.getMaxIops(), null);
 -        vol.setFormat(this.getSupportedImageFormatForCluster(template.getHypervisorType()));
 +        vol.setFormat(getSupportedImageFormatForCluster(template.getHypervisorType()));
          if (vm != null) {
              vol.setInstanceId(vm.getId());
          }
@@@ -1536,12 -1585,12 +1583,12 @@@
          if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) {
              throw new CloudRuntimeException("Can't move volume between scope: " + storeForDataStoreScope.getScopeType() + " and " + storeForRootStoreScope.getScopeType());
          }
 -
 +       
          return !storeForRootStoreScope.isSameScope(storeForDataStoreScope);
      }
 -
 +    
-     private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) {
-         String errorMsg = "Failed to attach volume: " + volume.getName()
+     private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, Long deviceId) {
+         String errorMsg = "Failed to attach volume: " + volumeToAttach.getName()
                  + " to VM: " + vm.getHostName();
          boolean sendCommand = (vm.getState() == State.Running);
          AttachAnswer answer = null;
@@@ -1928,14 -2031,9 +2019,9 @@@
          }
      }
  
-     
- 
-     
-     
- 
      @DB
      protected VolumeVO switchVolume(VolumeVO existingVolume,
 -            VirtualMachineProfile<? extends VirtualMachine> vm)
 +            VirtualMachineProfile vm)
              throws StorageUnavailableException {
          Transaction txn = Transaction.currentTxn();
  
@@@ -2222,16 -2323,16 +2308,16 @@@
          }
  
          if (vm.getType() == VirtualMachine.Type.User) {
 -            UserVmVO userVM = (UserVmVO) vm.getVirtualMachine();
 +            UserVm userVM = _entityMgr.findById(UserVm.class, vm.getId());
              if (userVM.getIsoId() != null) {
                  DataTO dataTO = tmplFactory.getTemplate(userVM.getIsoId(), DataStoreRole.Image, userVM.getDataCenterId()).getTO();
-                 DiskTO iso = new DiskTO(dataTO, 3L, Volume.Type.ISO);
+                 DiskTO iso = new DiskTO(dataTO, 3L, null, Volume.Type.ISO);
 -                vm.addDisk(iso);
 +                    vm.addDisk(iso);
 +                }
              }
          }
 -    }
 -
  
 +   
  
      private static enum VolumeTaskType {
          RECREATE,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/template/TemplateAdapterBase.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/template/TemplateAdapterBase.java
index 25b2984,c5d4a6b..88909d8
--- a/server/src/com/cloud/template/TemplateAdapterBase.java
+++ b/server/src/com/cloud/template/TemplateAdapterBase.java
@@@ -223,17 -223,17 +223,17 @@@ public abstract class TemplateAdapterBa
  	@Override
  	public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException {
  	    //check if the caller can operate with the template owner
 -        Account caller = UserContext.current().getCaller();
 +        Account caller = CallContext.current().getCallingAccount();
          Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId());
          _accountMgr.checkAccess(caller, null, true, owner);
 -
 +	    
- 
+     boolean isRouting = (cmd.isRoutingType() == null) ? false : cmd.isRoutingType();
 -
 -    return prepare(false, UserContext.current().getCallerUserId(), cmd.getTemplateName(), cmd.getDisplayText(),
 -                cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(),
 -                cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()),
 +        
 +		return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(),
 +				cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(),
 +				cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()),
                  cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(),
-                 cmd.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER);
+                 isRouting ? TemplateType.ROUTING : TemplateType.USER);
  
  	}
  

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/template/TemplateManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --cc server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
index c7675c8,7a61978..f71067d
--- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
+++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
@@@ -604,8 -655,9 +604,9 @@@ public class MockConfigurationManagerIm
       * @see com.cloud.configuration.ConfigurationManager#createDiskOffering(java.lang.Long, java.lang.String, java.lang.String, java.lang.Long, java.lang.String, boolean, boolean, boolean)
       */
      @Override
-     public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
+     public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+     		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
 -    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
 +            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
          // TODO Auto-generated method stub
          return null;
      }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/setup/db/db/schema-410to420.sql
----------------------------------------------------------------------


[08/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
SolidFire plug-in and related changes

SolidFire plug-in

SolidFire plug-in related


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/99227f7b
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/99227f7b
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/99227f7b

Branch: refs/heads/vmsync
Commit: 99227f7b3e824caeb89035982793ad510e460249
Parents: 02ab2eb
Author: Mike Tutkowski <mi...@solidfire.com>
Authored: Fri Jun 28 14:05:12 2013 -0600
Committer: Mike Tutkowski <mi...@solidfire.com>
Committed: Fri Jun 28 16:59:21 2013 -0600

----------------------------------------------------------------------
 api/src/com/cloud/agent/api/to/DiskTO.java      |  12 +-
 api/src/com/cloud/offering/DiskOffering.java    |  16 +-
 api/src/com/cloud/storage/StoragePool.java      |   2 +
 api/src/com/cloud/storage/Volume.java           |   6 +
 .../org/apache/cloudstack/api/ApiConstants.java |   6 +
 .../admin/offering/CreateDiskOfferingCmd.java   |  27 +-
 .../admin/storage/CreateStoragePoolCmd.java     |  24 +
 .../command/user/volume/CreateVolumeCmd.java    |  14 +
 .../api/response/DiskOfferingResponse.java      |  33 +
 .../api/response/StoragePoolResponse.java       |  11 +
 .../cloudstack/api/response/VolumeResponse.java |  16 +
 .../classes/resources/messages.properties       |   7 +-
 client/pom.xml                                  |   5 +
 client/tomcatconf/applicationContext.xml.in     |   1 +
 .../com/cloud/agent/api/AttachVolumeAnswer.java |  16 +-
 .../cloud/agent/api/AttachVolumeCommand.java    | 114 ++-
 .../api/agent/test/AttachVolumeAnswerTest.java  |   6 +-
 .../api/agent/test/AttachVolumeCommandTest.java |  10 +-
 .../agent/test/BackupSnapshotCommandTest.java   |   5 +
 .../api/agent/test/CheckNetworkAnswerTest.java  |   5 +
 .../api/agent/test/SnapshotCommandTest.java     |   4 +
 .../engine/subsystem/api/storage/ChapInfo.java  |  26 +
 .../subsystem/api/storage/DataStoreDriver.java  |  14 +-
 .../api/storage/PrimaryDataStoreDriver.java     |   6 +-
 .../api/storage/PrimaryDataStoreParameters.java |  50 +
 .../subsystem/api/storage/VolumeService.java    |   2 +
 .../storage/command/AttachCommand.java          |  71 ++
 .../storage/command/DettachCommand.java         |  17 +
 .../storage/datastore/db/StoragePoolVO.java     |  26 +-
 .../src/com/cloud/storage/DiskOfferingVO.java   |  46 +-
 .../schema/src/com/cloud/storage/VolumeVO.java  |  57 +-
 .../src/com/cloud/storage/dao/VolumeDao.java    |   2 +
 .../com/cloud/storage/dao/VolumeDaoImpl.java    |  13 +
 .../storage/image/TemplateServiceImpl.java      |   4 +-
 .../storage/image/store/ImageStoreImpl.java     |   2 +-
 .../storage/allocator/StorageAllocatorTest.java |   3 +-
 .../cloudstack/storage/test/SnapshotTest.java   |   2 +-
 .../cloudstack/storage/test/VolumeTest.java     |   2 +-
 .../storage/test/VolumeTestVmware.java          |   2 +-
 .../storage/test/volumeServiceTest.java         |   2 +-
 .../storage/snapshot/SnapshotServiceImpl.java   |   2 +-
 .../allocator/ZoneWideStoragePoolAllocator.java |  36 +-
 .../datastore/DataObjectManagerImpl.java        |   4 +-
 .../datastore/PrimaryDataStoreEntityImpl.java   |   6 +
 .../storage/image/BaseImageStoreDriverImpl.java |   4 +-
 .../datastore/PrimaryDataStoreHelper.java       |   5 +
 .../storage/datastore/PrimaryDataStoreImpl.java |   5 +
 .../cloudstack/storage/volume/VolumeObject.java |  15 +
 .../storage/volume/VolumeServiceImpl.java       |  19 +-
 .../kvm/resource/LibvirtComputingResource.java  |   2 +-
 .../agent/manager/MockStorageManagerImpl.java   |   2 +-
 .../vmware/resource/VmwareResource.java         | 274 +++++-
 .../xen/resource/CitrixResourceBase.java        | 143 ++-
 .../xen/resource/XenServerStorageProcessor.java |  18 +-
 .../CloudStackPrimaryDataStoreDriverImpl.java   |   9 +-
 .../SamplePrimaryDataStoreDriverImpl.java       |   9 +-
 plugins/storage/volume/solidfire/pom.xml        |   7 +-
 .../driver/SolidfirePrimaryDataStoreDriver.java | 480 +++++++++-
 .../SolidFirePrimaryDataStoreLifeCycle.java     | 274 ++++++
 .../SolidfirePrimaryDataStoreProvider.java      |  81 +-
 .../storage/datastore/util/SolidFireUtil.java   | 901 +++++++++++++++++++
 .../api/query/dao/DiskOfferingJoinDaoImpl.java  |   3 +
 .../api/query/dao/StoragePoolJoinDaoImpl.java   |   2 +
 .../cloud/api/query/dao/VolumeJoinDaoImpl.java  |   3 +
 .../cloud/api/query/vo/DiskOfferingJoinVO.java  |  33 +
 .../cloud/api/query/vo/StoragePoolJoinVO.java   |  13 +-
 .../com/cloud/api/query/vo/VolumeJoinVO.java    |  21 +-
 .../configuration/ConfigurationManager.java     |   8 +-
 .../configuration/ConfigurationManagerImpl.java |  53 +-
 .../cloud/server/ConfigurationServerImpl.java   |   2 +-
 .../src/com/cloud/storage/StorageManager.java   |   9 +-
 .../com/cloud/storage/StorageManagerImpl.java   |  44 +-
 server/src/com/cloud/storage/VolumeManager.java |   1 -
 .../com/cloud/storage/VolumeManagerImpl.java    | 139 ++-
 .../com/cloud/template/TemplateManagerImpl.java |   2 +-
 server/src/com/cloud/test/DatabaseConfig.java   |   2 +-
 server/src/com/cloud/vm/UserVmManagerImpl.java  |   4 +-
 .../cloud/vpc/MockConfigurationManagerImpl.java |   5 +-
 setup/db/db/schema-410to420.sql                 |  22 +
 tools/marvin/marvin/cloudstackConnection.py     |   2 +-
 ui/dictionary.jsp                               |   5 +
 ui/scripts/configuration.js                     | 179 +++-
 ui/scripts/docs.js                              |  16 +
 ui/scripts/sharedFunctions.js                   |   4 +-
 ui/scripts/storage.js                           |  56 +-
 ui/scripts/system.js                            |  19 +-
 utils/src/com/cloud/utils/StringUtils.java      |   8 +
 .../vmware/mo/HostDatastoreSystemMO.java        |  20 +-
 .../com/cloud/hypervisor/vmware/mo/HostMO.java  |  15 +-
 .../vmware/mo/HostStorageSystemMO.java          |  51 ++
 90 files changed, 3432 insertions(+), 292 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/com/cloud/agent/api/to/DiskTO.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/to/DiskTO.java b/api/src/com/cloud/agent/api/to/DiskTO.java
index 7b32f00..556ccd4 100644
--- a/api/src/com/cloud/agent/api/to/DiskTO.java
+++ b/api/src/com/cloud/agent/api/to/DiskTO.java
@@ -23,14 +23,16 @@ import com.cloud.storage.Volume;
 public class DiskTO {
     private DataTO data;
     private Long diskSeq;
+    private String vdiUuid;
     private Volume.Type type;
     public DiskTO() {
         
     }
     
-    public DiskTO(DataTO data, Long diskSeq, Volume.Type type) {
+    public DiskTO(DataTO data, Long diskSeq, String vdiUuid, Volume.Type type) {
         this.data = data;
         this.diskSeq = diskSeq;
+        this.vdiUuid = vdiUuid;
         this.type = type;
     }
 
@@ -50,6 +52,14 @@ public class DiskTO {
         this.diskSeq = diskSeq;
     }
 
+    public String getVdiUuid() {
+        return vdiUuid;
+    }
+
+    public void setVdiUuid(String vdiUuid) {
+        this.vdiUuid = vdiUuid;
+    }
+
     public Volume.Type getType() {
         return type;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/com/cloud/offering/DiskOffering.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/offering/DiskOffering.java b/api/src/com/cloud/offering/DiskOffering.java
index ae4528c..9c196e0 100644
--- a/api/src/com/cloud/offering/DiskOffering.java
+++ b/api/src/com/cloud/offering/DiskOffering.java
@@ -47,12 +47,24 @@ public interface DiskOffering extends InfrastructureEntity, Identity, InternalId
 
     Date getCreated();
 
-    long getDiskSize();
-
     boolean isCustomized();
 
     void setDiskSize(long diskSize);
 
+    long getDiskSize();
+
+    void setCustomizedIops(Boolean customizedIops);
+
+    Boolean isCustomizedIops();
+
+    void setMinIops(Long minIops);
+
+    Long getMinIops();
+
+    void setMaxIops(Long maxIops);
+
+    Long getMaxIops();
+
     void setBytesReadRate(Long bytesReadRate);
 
     Long getBytesReadRate();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/com/cloud/storage/StoragePool.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java
index 8f8b864..6e9af12 100644
--- a/api/src/com/cloud/storage/StoragePool.java
+++ b/api/src/com/cloud/storage/StoragePool.java
@@ -60,6 +60,8 @@ public interface StoragePool extends Identity, InternalIdentity {
      */
     long getUsedBytes();
 
+    Long getCapacityIops();
+
     Long getClusterId();
 
     /**

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/com/cloud/storage/Volume.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/storage/Volume.java b/api/src/com/cloud/storage/Volume.java
index f5ed4e2..342dfd3 100755
--- a/api/src/com/cloud/storage/Volume.java
+++ b/api/src/com/cloud/storage/Volume.java
@@ -122,6 +122,12 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
      */
     Long getSize();
 
+    Long getMinIops();
+
+    Long getMaxIops();
+
+    String get_iScsiName();
+
     /**
      * @return the vm instance id
      */

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/ApiConstants.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java
index 809e023..dd876f7 100755
--- a/api/src/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/org/apache/cloudstack/api/ApiConstants.java
@@ -51,6 +51,9 @@ public class ApiConstants {
     public static final String CPU_OVERCOMMIT_RATIO="cpuovercommitratio";
     public static final String CREATED = "created";
     public static final String CUSTOMIZED = "customized";
+    public static final String CUSTOMIZED_IOPS = "customizediops";
+    public static final String MIN_IOPS = "miniops";
+    public static final String MAX_IOPS = "maxiops";
     public static final String DESCRIPTION = "description";
     public static final String DESTINATION_ZONE_ID = "destzoneid";
     public static final String DETAILS = "details";
@@ -326,6 +329,9 @@ public class ApiConstants {
     public static final String SERVICE_CAPABILITY_LIST = "servicecapabilitylist";
     public static final String CAN_CHOOSE_SERVICE_CAPABILITY = "canchooseservicecapability";
     public static final String PROVIDER = "provider";
+    public static final String MANAGED = "managed";
+    public static final String CAPACITY_BYTES = "capacitybytes";
+    public static final String CAPACITY_IOPS = "capacityiops";
     public static final String NETWORK_SPEED = "networkspeed";
     public static final String BROADCAST_DOMAIN_RANGE = "broadcastdomainrange";
     public static final String ISOLATION_METHODS = "isolationmethods";

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
index a2c5f77..4741591 100644
--- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
@@ -52,7 +52,7 @@ public class CreateDiskOfferingCmd extends BaseCmd {
     @Parameter(name=ApiConstants.TAGS, type=CommandType.STRING, description="tags for the disk offering", length=4096)
     private String tags;
 
-    @Parameter(name=ApiConstants.CUSTOMIZED, type=CommandType.BOOLEAN, description="whether disk offering is custom or not")
+    @Parameter(name=ApiConstants.CUSTOMIZED, type=CommandType.BOOLEAN, description="whether disk offering size is custom or not")
     private Boolean customized;
 
     @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class,
@@ -62,6 +62,9 @@ public class CreateDiskOfferingCmd extends BaseCmd {
     @Parameter(name=ApiConstants.STORAGE_TYPE, type=CommandType.STRING, description="the storage type of the disk offering. Values are local and shared.")
     private String storageType = ServiceOffering.StorageType.shared.toString();
 
+    @Parameter(name=ApiConstants.DISPLAY_OFFERING, type=CommandType.BOOLEAN, description="an optional field, whether to display the offering to the end user or not.")
+    private Boolean displayOffering;
+
     @Parameter(name=ApiConstants.BYTES_READ_RATE, type=CommandType.LONG, required=false, description="bytes read rate of the disk offering")
     private Long bytesReadRate;
 
@@ -74,8 +77,14 @@ public class CreateDiskOfferingCmd extends BaseCmd {
     @Parameter(name=ApiConstants.IOPS_WRITE_RATE, type=CommandType.LONG, required=false, description="io requests write rate of the disk offering")
     private Long iopsWriteRate;
 
-    @Parameter(name=ApiConstants.DISPLAY_OFFERING, type=CommandType.BOOLEAN, description="an optional field, whether to display the offering to the end user or not.")
-    private Boolean displayOffering;
+    @Parameter(name=ApiConstants.CUSTOMIZED_IOPS, type=CommandType.BOOLEAN, required=false, description="whether disk offering iops is custom or not")
+    private Boolean customizedIops;
+
+    @Parameter(name=ApiConstants.MIN_IOPS, type=CommandType.LONG, required=false, description="min iops of the disk offering")
+    private Long minIops;
+
+    @Parameter(name=ApiConstants.MAX_IOPS, type=CommandType.LONG, required=false, description="max iops of the disk offering")
+    private Long maxIops;
 
 /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
@@ -101,6 +110,18 @@ public class CreateDiskOfferingCmd extends BaseCmd {
         return customized;
     }
 
+    public Boolean isCustomizedIops() {
+        return customizedIops;
+    }
+
+    public Long getMinIops() {
+        return minIops;
+    }
+
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
     public Long getDomainId(){
         return domainId;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
index 74eb2b9..f5750b9 100644
--- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
@@ -80,6 +80,18 @@ public class CreateStoragePoolCmd extends BaseCmd {
             required=false, description="the scope of the storage: cluster or zone")
     private String scope;
 
+    @Parameter(name=ApiConstants.MANAGED, type=CommandType.BOOLEAN,
+            required=false, description="whether the storage should be managed by CloudStack")
+    private Boolean managed;
+
+    @Parameter(name=ApiConstants.CAPACITY_IOPS, type=CommandType.LONG,
+            required=false, description="IOPS CloudStack can provision from this storage pool")
+    private Long capacityIops;
+
+    @Parameter(name=ApiConstants.CAPACITY_BYTES, type=CommandType.LONG,
+            required=false, description="bytes CloudStack can provision from this storage pool")
+    private Long capacityBytes;
+
     @Parameter(name=ApiConstants.HYPERVISOR, type=CommandType.STRING, required=false,
             description="hypervisor type of the hosts in zone that will be attached to this storage pool. KVM, VMware supported as of now.")
     private String hypervisor;
@@ -124,6 +136,18 @@ public class CreateStoragePoolCmd extends BaseCmd {
         return this.scope;
     }
 
+    public Boolean isManaged() {
+    	return managed;
+    }
+
+    public Long getCapacityIops() {
+        return capacityIops;
+    }
+
+    public Long getCapacityBytes() {
+        return capacityBytes;
+    }
+
     public String getHypervisor() {
         return hypervisor;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java
index 6f0bf3a..f293a03 100644
--- a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java
@@ -68,6 +68,12 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd {
     @Parameter(name=ApiConstants.SIZE, type=CommandType.LONG, description="Arbitrary volume size")
     private Long size;
 
+    @Parameter(name=ApiConstants.MIN_IOPS, type=CommandType.LONG, description="min iops")
+    private Long minIops;
+
+    @Parameter(name=ApiConstants.MAX_IOPS, type=CommandType.LONG, description="max iops")
+    private Long maxIops;
+
     @Parameter(name=ApiConstants.SNAPSHOT_ID, type=CommandType.UUID, entityType=SnapshotResponse.class,
             description="the snapshot ID for the disk volume. Either diskOfferingId or snapshotId must be passed in.")
     private Long snapshotId;
@@ -104,6 +110,14 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd {
         return size;
     }
 
+    public Long getMinIops() {
+        return minIops;
+    }
+
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
     public Long getSnapshotId() {
         return snapshotId;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java b/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java
index 35cf21a..4291d85 100644
--- a/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java
@@ -52,6 +52,15 @@ public class DiskOfferingResponse extends BaseResponse {
     @SerializedName("iscustomized") @Param(description="true if disk offering uses custom size, false otherwise")
     private Boolean customized;
 
+    @SerializedName("iscustomizediops") @Param(description="true if disk offering uses custom iops, false otherwise")
+    private Boolean customizedIops;
+
+    @SerializedName(ApiConstants.MIN_IOPS) @Param(description="the min iops of the disk offering")
+    private Long minIops;
+
+    @SerializedName(ApiConstants.MAX_IOPS) @Param(description="the max iops of the disk offering")
+    private Long maxIops;
+
     @SerializedName(ApiConstants.TAGS) @Param(description="the tags for the disk offering")
     private String tags;
 
@@ -154,6 +163,30 @@ public class DiskOfferingResponse extends BaseResponse {
         this.customized = customized;
     }
 
+    public Boolean isCustomizedIops() {
+        return customizedIops;
+    }
+
+    public void setCustomizedIops(Boolean customizedIops) {
+        this.customizedIops = customizedIops;
+    }
+
+    public Long getMinIops() {
+        return minIops;
+    }
+
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
+
     public String getStorageType() {
         return storageType;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
index 0050000..7321d98 100644
--- a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
@@ -74,6 +74,9 @@ public class StoragePoolResponse extends BaseResponse {
     @SerializedName("disksizeused") @Param(description="the host's currently used disk size")
     private Long diskSizeUsed;
 
+    @SerializedName("capacityiops") @Param(description="IOPS CloudStack can provision from this storage pool")
+    private Long capacityIops;
+
     @SerializedName("tags") @Param(description="the tags for the storage pool")
     private String tags;
 
@@ -237,6 +240,14 @@ public class StoragePoolResponse extends BaseResponse {
         this.diskSizeUsed = diskSizeUsed;
     }
 
+    public Long getCapacityIops() {
+        return capacityIops;
+    }
+
+    public void setCapacityIops(Long capacityIops) {
+        this.capacityIops = capacityIops;
+    }
+
     public String getTags() {
         return tags;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java
index b643de1..338fcaa 100644
--- a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java
@@ -76,6 +76,14 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity
     @Param(description = "size of the disk volume")
     private Long size;
 
+    @SerializedName(ApiConstants.MIN_IOPS)
+    @Param(description = "min iops of the disk volume")
+    private Long minIops;
+
+    @SerializedName(ApiConstants.MAX_IOPS)
+    @Param(description = "max iops of the disk volume")
+    private Long maxIops;
+
     @SerializedName(ApiConstants.CREATED)
     @Param(description = "the date the disk volume was created")
     private Date created;
@@ -241,6 +249,14 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity
         this.size = size;
     }
 
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
+
     public void setCreated(Date created) {
         this.created = created;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/client/WEB-INF/classes/resources/messages.properties
----------------------------------------------------------------------
diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties
index ad8d29d..b1a09b1 100644
--- a/client/WEB-INF/classes/resources/messages.properties
+++ b/client/WEB-INF/classes/resources/messages.properties
@@ -14,6 +14,10 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+label.custom.disk.iops=Custom IOPS
+label.disk.iops.min=Min IOPS
+label.disk.iops.max=Max IOPS
+label.disk.iops.total=IOPS Total
 label.view.secondary.ips=View secondary IPs
 message.acquire.ip.nic=Please confirm that you would like to acquire a new secondary IP for this NIC.<br/>NOTE: You need to manually configure the newly-acquired secondary IP inside the virtual machine.
 message.select.affinity.groups=Please select any affinity groups you want this VM to belong to:
@@ -395,7 +399,7 @@ label.code=Code
 label.community=Community
 label.compute.and.storage=Compute and Storage
 label.compute.offering=Compute offering
-label.compute.offerings=Compute offerings
+label.compute.offerings=Compute Offerings
 label.compute=Compute
 label.configuration=Configuration
 label.configure.network.ACLs=Configure Network ACLs
@@ -1046,6 +1050,7 @@ label.stopped.vms=Stopped VMs
 label.storage.tags=Storage Tags
 label.storage.traffic=Storage Traffic
 label.storage.type=Storage Type
+label.qos.type=QoS Type
 label.storage=Storage
 label.subdomain.access=Subdomain Access
 label.submit=Submit

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/client/pom.xml
----------------------------------------------------------------------
diff --git a/client/pom.xml b/client/pom.xml
index b8182c2..d1eeb3b 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -22,6 +22,11 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-plugin-storage-volume-solidfire</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
       <artifactId>cloud-server</artifactId>
       <version>${project.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in
index 82ce9e9..7052fd7 100644
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@ -806,6 +806,7 @@
   <bean id="cloudStackImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl" />
   <bean id="s3ImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl" />
   <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />  
+  <bean id="solidFireDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider" />
   <bean id="ApplicationLoadBalancerService" class="org.apache.cloudstack.network.lb.ApplicationLoadBalancerManagerImpl" />
   <bean id="InternalLoadBalancerVMManager" class="org.apache.cloudstack.network.lb.InternalLoadBalancerVMManagerImpl" />
   <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/src/com/cloud/agent/api/AttachVolumeAnswer.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/agent/api/AttachVolumeAnswer.java b/core/src/com/cloud/agent/api/AttachVolumeAnswer.java
index b377b7c..6b965b0 100644
--- a/core/src/com/cloud/agent/api/AttachVolumeAnswer.java
+++ b/core/src/com/cloud/agent/api/AttachVolumeAnswer.java
@@ -19,35 +19,33 @@ package com.cloud.agent.api;
 
 public class AttachVolumeAnswer extends Answer {
     private Long deviceId;
+    private String vdiUuid;
     private String chainInfo;
 
-    protected AttachVolumeAnswer() {
-
-    }
-
     public AttachVolumeAnswer(AttachVolumeCommand cmd, String result) {
         super(cmd, false, result);
         this.deviceId = null;
     }
 
-    public AttachVolumeAnswer(AttachVolumeCommand cmd, Long deviceId) {
+    public AttachVolumeAnswer(AttachVolumeCommand cmd, Long deviceId, String vdiUuid) {
         super(cmd);
         this.deviceId = deviceId;
+        this.vdiUuid = vdiUuid;
     }
 
-
     public AttachVolumeAnswer(AttachVolumeCommand cmd) {
         super(cmd);
         this.deviceId = null;
     }
 
-    /**
-     * @return the deviceId
-     */
     public Long getDeviceId() {
         return deviceId;
     }
 
+    public String getVdiUuid() {
+    	return vdiUuid;
+    }
+    
     public void setChainInfo(String chainInfo) {
     	this.chainInfo = chainInfo;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/src/com/cloud/agent/api/AttachVolumeCommand.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/agent/api/AttachVolumeCommand.java b/core/src/com/cloud/agent/api/AttachVolumeCommand.java
index 2658262..2eb503a 100644
--- a/core/src/com/cloud/agent/api/AttachVolumeCommand.java
+++ b/core/src/com/cloud/agent/api/AttachVolumeCommand.java
@@ -19,29 +19,37 @@ package com.cloud.agent.api;
 import com.cloud.storage.Storage.StoragePoolType;
 
 public class AttachVolumeCommand extends Command {
-
-	boolean attach;
-	String vmName;
-	StoragePoolType pooltype;
-	String poolUuid;
-	String volumeFolder;
-	String volumePath;
-	String volumeName;
-	Long deviceId;
-	String chainInfo;
-    Long bytesReadRate;
-    Long bytesWriteRate;
-    Long iopsReadRate;
-    Long iopsWriteRate;
+	private boolean attach;
+	private boolean _managed;
+	private String vmName;
+	private StoragePoolType pooltype;
+	private String volumePath;
+	private String volumeName;
+	private Long deviceId;
+	private String chainInfo;
+	private String poolUuid;
+	private String _storageHost;
+	private int _storagePort;
+	private String _iScsiName;
+	private String _chapInitiatorUsername;
+	private String _chapInitiatorPassword;
+	private String _chapTargetUsername;
+	private String _chapTargetPassword;
+	private Long bytesReadRate;
+	private Long bytesWriteRate;
+	private Long iopsReadRate;
+	private Long iopsWriteRate;
 
 	protected AttachVolumeCommand() {
 	}
 
-	public AttachVolumeCommand(boolean attach, String vmName, StoragePoolType pooltype, String volumeFolder, String volumePath, String volumeName, Long deviceId, String chainInfo) {
+    public AttachVolumeCommand(boolean attach, boolean managed, String vmName,
+            StoragePoolType pooltype, String volumePath, String volumeName,
+            Long deviceId, String chainInfo) {
 		this.attach = attach;
+		this._managed = managed;
 		this.vmName = vmName;
 		this.pooltype = pooltype;
-		this.volumeFolder = volumeFolder;
 		this.volumePath = volumePath;
 		this.volumeName = volumeName;
 		this.deviceId = deviceId;
@@ -54,7 +62,7 @@ public class AttachVolumeCommand extends Command {
     }
 
 	public boolean getAttach() {
-		return attach;
+	    return attach;
 	}
 
 	public String getVmName() {
@@ -69,16 +77,12 @@ public class AttachVolumeCommand extends Command {
         this.pooltype = pooltype;
     }
 
-    public String getVolumeFolder() {
-		return volumeFolder;
-	}
-
 	public String getVolumePath() {
 		return volumePath;
 	}
 
 	public String getVolumeName() {
-		return volumeName;
+	    return volumeName;
 	}
 
     public Long getDeviceId() {
@@ -90,17 +94,77 @@ public class AttachVolumeCommand extends Command {
     }
 
     public String getPoolUuid() {
-    	return poolUuid;
+        return poolUuid;
     }
 
     public void setPoolUuid(String poolUuid) {
-    	this.poolUuid = poolUuid;
+        this.poolUuid = poolUuid;
     }
 
     public String getChainInfo() {
-    	return chainInfo;
+        return chainInfo;
+    }
+
+    public void setStorageHost(String storageHost) {
+        _storageHost = storageHost;
     }
 
+	public String getStorageHost() {
+	    return _storageHost;
+	}
+
+	public void setStoragePort(int storagePort) {
+	    _storagePort = storagePort;
+	}
+
+	public int getStoragePort() {
+	    return _storagePort;
+	}
+
+	public boolean isManaged() {
+        return _managed;
+    }
+
+	public void set_iScsiName(String iScsiName) {
+	    this._iScsiName = iScsiName;
+	}
+
+	public String get_iScsiName() {
+	    return _iScsiName;
+	}
+
+	public void setChapInitiatorUsername(String chapInitiatorUsername) {
+	    _chapInitiatorUsername = chapInitiatorUsername;
+	}
+
+	public String getChapInitiatorUsername() {
+	    return _chapInitiatorUsername;
+	}
+
+	public void setChapInitiatorPassword(String chapInitiatorPassword) {
+	    _chapInitiatorPassword = chapInitiatorPassword;
+	}
+
+	public String getChapInitiatorPassword() {
+	    return _chapInitiatorPassword;
+	}
+
+	public void setChapTargetUsername(String chapTargetUsername) {
+	    _chapTargetUsername = chapTargetUsername;
+	}
+
+	public String getChapTargetUsername() {
+	    return _chapTargetUsername;
+	}
+
+	public void setChapTargetPassword(String chapTargetPassword) {
+	    _chapTargetPassword = chapTargetPassword;
+	}
+
+	public String getChapTargetPassword() {
+	    return _chapTargetPassword;
+	}
+
     public void setBytesReadRate(Long bytesReadRate) {
         this.bytesReadRate = bytesReadRate;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java
index 251a6cb..9e43d9f 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java
@@ -26,14 +26,14 @@ import com.cloud.agent.api.AttachVolumeCommand;
 import com.cloud.storage.Storage.StoragePoolType;
 
 public class AttachVolumeAnswerTest {
-    AttachVolumeCommand avc = new AttachVolumeCommand(true, "vmname",
-            StoragePoolType.Filesystem, "vFolder", "vPath", "vName",
+    AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname",
+            StoragePoolType.Filesystem, "vPath", "vName",
             123456789L, "chainInfo");
     AttachVolumeAnswer ava1 = new AttachVolumeAnswer(avc);
     String results = "";
     AttachVolumeAnswer ava2 = new AttachVolumeAnswer(avc, results);
     Long deviceId = 10L;
-    AttachVolumeAnswer ava3 = new AttachVolumeAnswer(avc, deviceId);
+    AttachVolumeAnswer ava3 = new AttachVolumeAnswer(avc, deviceId, "");
 
     @Test
     public void testGetDeviceId() {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java
index 1ec416a..6f413c0 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java
@@ -25,8 +25,8 @@ import com.cloud.agent.api.AttachVolumeCommand;
 import com.cloud.storage.Storage.StoragePoolType;
 
 public class AttachVolumeCommandTest {
-    AttachVolumeCommand avc = new AttachVolumeCommand(true, "vmname",
-            StoragePoolType.Filesystem, "vFolder", "vPath", "vName",
+    AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname",
+            StoragePoolType.Filesystem, "vPath", "vName",
             123456789L, "chainInfo");
 
     @Test
@@ -66,12 +66,6 @@ public class AttachVolumeCommandTest {
     }
 
     @Test
-    public void testGetVolumeFolder() {
-        String vFolder = avc.getVolumeFolder();
-        assertTrue(vFolder.equals("vFolder"));
-    }
-
-    @Test
     public void testGetVolumePath() {
         String vPath = avc.getVolumePath();
         assertTrue(vPath.equals("vPath"));

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
index 9890593..0fee8c6 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
@@ -88,6 +88,11 @@ public class BackupSnapshotCommandTest {
         };
 
         @Override
+        public Long getCapacityIops() {
+            return 0L;
+        }
+
+        @Override
         public Long getClusterId() {
             return 0L;
         };

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
index 4db6557..b834a26 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
@@ -126,6 +126,11 @@ public class CheckNetworkAnswerTest {
             };
 
             @Override
+            public Long getCapacityIops() {
+                return 0L;
+            };
+
+            @Override
             public Long getClusterId() {
                 return 0L;
             };

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
index 3076d45..35bdfc8 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
@@ -78,6 +78,10 @@ public class SnapshotCommandTest {
             return 0L;
         };
 
+        public Long getCapacityIops() {
+            return 0L;
+        };
+
         public Long getClusterId() {
             return 0L;
         };

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ChapInfo.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ChapInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ChapInfo.java
new file mode 100644
index 0000000..97c9ecb
--- /dev/null
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ChapInfo.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.engine.subsystem.api.storage;
+
+public interface ChapInfo {
+    String getInitiatorUsername();
+    String getInitiatorSecret();
+    String getTargetUsername();
+    String getTargetSecret();
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java
index 1cb6e15..127b858 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java
@@ -24,17 +24,11 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.storage.command.CommandResult;
 
 public interface DataStoreDriver {
-    void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback);
-
-    void deleteAsync(DataObject data, AsyncCompletionCallback<CommandResult> callback);
-
+    DataTO getTO(DataObject data);
+    DataStoreTO getStoreTO(DataStore store);
+    void createAsync(DataStore store, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback);
+    void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback<CommandResult> callback);
     void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback);
-
     boolean canCopy(DataObject srcData, DataObject destData);
-
     void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback);
-
-    DataTO getTO(DataObject data);
-
-    DataStoreTO getStoreTO(DataStore store);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
index 2528a53..b124d83 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
@@ -22,7 +22,7 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.storage.command.CommandResult;
 
 public interface PrimaryDataStoreDriver extends DataStoreDriver {
-    void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback);
-
-    void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CommandResult> callback);
+    public ChapInfo getChapInfo(VolumeInfo volumeInfo);
+    public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback);
+    public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CommandResult> callback);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java
index 3b5362a..c05419f 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java
@@ -20,6 +20,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
 
 import java.util.Map;
 
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.Storage.StoragePoolType;
 
 public class PrimaryDataStoreParameters {
@@ -30,12 +31,17 @@ public class PrimaryDataStoreParameters {
     private Map<String, String> details;
     private String tags;
     private StoragePoolType type;
+    private HypervisorType hypervisorType;
     private String host;
     private String path;
     private int port;
     private String uuid;
     private String name;
     private String userInfo;
+    private long capacityBytes;
+    private long usedBytes;
+    private boolean managed;
+    private Long capacityIops;
 
     /**
      * @return the userInfo
@@ -187,6 +193,30 @@ public class PrimaryDataStoreParameters {
         this.providerName = providerName;
     }
 
+    public void setManaged(boolean managed) {
+    	this.managed = managed;
+    }
+
+    public boolean isManaged() {
+    	return managed;
+    }
+
+    public void setCapacityIops(Long capacityIops) {
+        this.capacityIops = capacityIops;
+    }
+
+    public Long getCapacityIops() {
+        return capacityIops;
+    }
+
+    public void setHypervisorType(HypervisorType hypervisorType) {
+        this.hypervisorType = hypervisorType;
+    }
+
+    public HypervisorType getHypervisorType() {
+        return hypervisorType;
+    }
+
     /**
      * @return the clusterId
      */
@@ -231,4 +261,24 @@ public class PrimaryDataStoreParameters {
     public void setZoneId(Long zoneId) {
         this.zoneId = zoneId;
     }
+
+    public long getCapacityBytes()
+    {
+    	return capacityBytes;
+    }
+
+    public void setCapacityBytes(long capacityBytes)
+    {
+    	this.capacityBytes = capacityBytes;
+    }
+
+    public long getUsedBytes()
+    {
+    	return usedBytes;
+    }
+
+    public void setUsedBytes(long usedBytes)
+    {
+    	this.usedBytes = usedBytes;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
index f96ea40..7515088 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
@@ -42,6 +42,8 @@ public interface VolumeService {
         }
     }
 
+    ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore);
+
     /**
      * Creates the volume based on the given criteria
      * 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/storage/command/AttachCommand.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/storage/command/AttachCommand.java b/engine/api/src/org/apache/cloudstack/storage/command/AttachCommand.java
index 6b4e9f7..44bce91 100644
--- a/engine/api/src/org/apache/cloudstack/storage/command/AttachCommand.java
+++ b/engine/api/src/org/apache/cloudstack/storage/command/AttachCommand.java
@@ -24,6 +24,14 @@ import com.cloud.agent.api.to.DiskTO;
 public final class AttachCommand extends Command implements StorageSubSystemCommand {
     private DiskTO disk;
     private String vmName;
+    private String _storageHost;
+    private int _storagePort;
+    private boolean _managed;
+    private String _iScsiName;
+    private String _chapInitiatorUsername;
+    private String _chapInitiatorPassword;
+    private String _chapTargetUsername;
+    private String _chapTargetPassword;
 
     public AttachCommand(DiskTO disk, String vmName) {
         super();
@@ -52,4 +60,67 @@ public final class AttachCommand extends Command implements StorageSubSystemComm
         this.vmName = vmName;
     }
 
+    public void setStorageHost(String storageHost) {
+        _storageHost = storageHost;
+    }
+
+    public String getStorageHost() {
+        return _storageHost;
+    }
+
+    public void setStoragePort(int storagePort) {
+        _storagePort = storagePort;
+    }
+
+    public int getStoragePort() {
+        return _storagePort;
+    }
+
+    public void setManaged(boolean managed) {
+        _managed = managed;
+    }
+
+    public boolean isManaged() {
+        return _managed;
+    }
+
+    public void set_iScsiName(String iScsiName) {
+        this._iScsiName = iScsiName;
+    }
+
+    public String get_iScsiName() {
+        return _iScsiName;
+    }
+
+    public void setChapInitiatorUsername(String chapInitiatorUsername) {
+        _chapInitiatorUsername = chapInitiatorUsername;
+    }
+
+    public String getChapInitiatorUsername() {
+        return _chapInitiatorUsername;
+    }
+
+    public void setChapInitiatorPassword(String chapInitiatorPassword) {
+        _chapInitiatorPassword = chapInitiatorPassword;
+    }
+
+    public String getChapInitiatorPassword() {
+        return _chapInitiatorPassword;
+    }
+
+    public void setChapTargetUsername(String chapTargetUsername) {
+        _chapTargetUsername = chapTargetUsername;
+    }
+
+    public String getChapTargetUsername() {
+        return _chapTargetUsername;
+    }
+
+    public void setChapTargetPassword(String chapTargetPassword) {
+        _chapTargetPassword = chapTargetPassword;
+    }
+
+    public String getChapTargetPassword() {
+        return _chapTargetPassword;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/storage/command/DettachCommand.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/storage/command/DettachCommand.java b/engine/api/src/org/apache/cloudstack/storage/command/DettachCommand.java
index a0ab4b2..bb7325c 100644
--- a/engine/api/src/org/apache/cloudstack/storage/command/DettachCommand.java
+++ b/engine/api/src/org/apache/cloudstack/storage/command/DettachCommand.java
@@ -24,6 +24,8 @@ import com.cloud.agent.api.to.DiskTO;
 public class DettachCommand extends Command implements StorageSubSystemCommand {
     private DiskTO disk;
     private String vmName;
+    private boolean _managed;
+    private String _iScsiName;
 
     public DettachCommand(DiskTO disk, String vmName) {
         super();
@@ -52,4 +54,19 @@ public class DettachCommand extends Command implements StorageSubSystemCommand {
         this.vmName = vmName;
     }
 
+    public void setManaged(boolean managed) {
+        _managed = managed;
+    }
+
+    public boolean isManaged() {
+        return _managed;
+    }
+
+    public void set_iScsiName(String iScsiName) {
+        _iScsiName = iScsiName;
+    }
+
+    public String get_iScsiName() {
+        return _iScsiName;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
index 9b8de67..a8c1e7f 100644
--- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
+++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
@@ -103,6 +103,12 @@ public class StoragePoolVO implements StoragePool {
     @Enumerated(value = EnumType.STRING)
     private ScopeType scope;
 
+    @Column(name = "managed")
+    private boolean managed;
+
+    @Column(name = "capacity_iops", updatable = true, nullable = true)
+    private Long capacityIops;
+
     @Column(name = "hypervisor")
     @Enumerated(value = EnumType.STRING)
     private HypervisorType hypervisor;
@@ -201,8 +207,24 @@ public class StoragePoolVO implements StoragePool {
         usedBytes = available;
     }
 
-    public void setCapacityBytes(long capacity) {
-        capacityBytes = capacity;
+    public void setCapacityBytes(long capacityBytes) {
+        this.capacityBytes = capacityBytes;
+    }
+
+    public void setManaged(boolean managed) {
+    	this.managed = managed;
+    }
+
+    public boolean isManaged() {
+    	return managed;
+    }
+
+    public void setCapacityIops(Long capacityIops) {
+        this.capacityIops = capacityIops;
+    }
+
+    public Long getCapacityIops() {
+        return capacityIops;
     }
 
     public Long getClusterId() {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/schema/src/com/cloud/storage/DiskOfferingVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/com/cloud/storage/DiskOfferingVO.java
index b7363e7..d9656b4 100755
--- a/engine/schema/src/com/cloud/storage/DiskOfferingVO.java
+++ b/engine/schema/src/com/cloud/storage/DiskOfferingVO.java
@@ -94,6 +94,15 @@ public class DiskOfferingVO implements DiskOffering {
     @Column(name = "uuid")
     private String uuid;
 
+    @Column(name="customized_iops")
+    private Boolean customizedIops;
+
+    @Column(name="min_iops")
+    Long minIops;
+
+    @Column(name="max_iops")
+    Long maxIops;
+
     @Column(name = "sort_key")
     int sortKey;
 
@@ -116,8 +125,8 @@ public class DiskOfferingVO implements DiskOffering {
         this.uuid = UUID.randomUUID().toString();
     }
 
-    public DiskOfferingVO(Long domainId, String name, String displayText, long diskSize, String tags,
-            boolean isCustomized) {
+    public DiskOfferingVO(Long domainId, String name, String displayText, long diskSize, String tags, boolean isCustomized,
+    		Boolean isCustomizedIops, Long minIops, Long maxIops) {
         this.domainId = domainId;
         this.name = name;
         this.displayText = displayText;
@@ -128,6 +137,9 @@ public class DiskOfferingVO implements DiskOffering {
         this.useLocalStorage = false;
         this.customized = isCustomized;
         this.uuid = UUID.randomUUID().toString();
+        this.customizedIops = isCustomizedIops;
+        this.minIops = minIops;
+        this.maxIops = maxIops;
     }
 
     public DiskOfferingVO(String name, String displayText, boolean mirrored, String tags, boolean recreatable,
@@ -175,6 +187,36 @@ public class DiskOfferingVO implements DiskOffering {
     }
 
     @Override
+    public Boolean isCustomizedIops() {
+        return customizedIops;
+    }
+
+    @Override
+    public void setCustomizedIops(Boolean customizedIops) {
+        this.customizedIops = customizedIops;
+    }
+
+    @Override
+    public Long getMinIops() {
+        return minIops;
+    }
+
+	@Override
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    @Override
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
+    @Override
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
+
+	@Override
     public String getUniqueName() {
         return uniqueName;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/schema/src/com/cloud/storage/VolumeVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/VolumeVO.java b/engine/schema/src/com/cloud/storage/VolumeVO.java
index 02c09a2..7b54f3d 100755
--- a/engine/schema/src/com/cloud/storage/VolumeVO.java
+++ b/engine/schema/src/com/cloud/storage/VolumeVO.java
@@ -70,6 +70,12 @@ public class VolumeVO implements Volume {
     @Column(name = "size")
     Long size;
 
+    @Column(name = "min_iops")
+    Long minIops;
+
+    @Column(name = "max_iops")
+    Long maxIops;
+
     @Column(name = "folder")
     String folder;
 
@@ -141,25 +147,32 @@ public class VolumeVO implements Volume {
     @Column(name = "display_volume", updatable = true, nullable = false)
     protected boolean displayVolume;
 
+    @Column(name = "iscsi_name")
+    private String _iScsiName;
+
     @Transient
     // @Column(name="reservation")
     String reservationId;
 
     // Real Constructor
-    public VolumeVO(Type type, String name, long dcId, long domainId, long accountId, long diskOfferingId, long size) {
+    public VolumeVO(Type type, String name, long dcId, long domainId, long accountId, long diskOfferingId, long size,
+    		Long minIops, Long maxIops, String iScsiName) {
         this.volumeType = type;
         this.name = name;
         this.dataCenterId = dcId;
         this.accountId = accountId;
         this.domainId = domainId;
         this.size = size;
+        this.minIops = minIops;
+        this.maxIops = maxIops;
+        this._iScsiName = iScsiName;
         this.diskOfferingId = diskOfferingId;
         this.state = State.Allocated;
         this.uuid = UUID.randomUUID().toString();
     }
 
-    public VolumeVO(String name, Long dcId, Long podId, long accountId, long domainId, Long instanceId, String folder,
-            String path, long size, Volume.Type vType) {
+    public VolumeVO(String name, long dcId, long podId, long accountId, long domainId, Long instanceId, String folder, String path,
+    		long size, Long minIops, Long maxIops, String iScsiName, Volume.Type vType) {
         this.name = name;
         this.accountId = accountId;
         this.domainId = domainId;
@@ -167,6 +180,9 @@ public class VolumeVO implements Volume {
         this.folder = folder;
         this.path = path;
         this.size = size;
+        this.minIops = minIops;
+        this.maxIops = maxIops;
+        this._iScsiName = iScsiName;
         this.podId = podId;
         this.dataCenterId = dcId;
         this.volumeType = vType;
@@ -177,11 +193,15 @@ public class VolumeVO implements Volume {
 
     // Copy Constructor
     public VolumeVO(Volume that) {
-        this(that.getName(), that.getDataCenterId(), that.getPodId(), that.getAccountId(), that.getDomainId(), that
-                .getInstanceId(), that.getFolder(), that.getPath(), that.getSize(), that.getVolumeType());
+        this(that.getName(), that.getDataCenterId(), that.getPodId(), that.getAccountId(), that.getDomainId(), that.getInstanceId(),
+        		that.getFolder(), that.getPath(), that.getSize(), that.getMinIops(), that.getMaxIops(),
+        		that.get_iScsiName(), that.getVolumeType());
         this.recreatable = that.isRecreatable();
         this.state = that.getState();
         this.size = that.getSize();
+        this.minIops = that.getMinIops();
+        this.maxIops = that.getMaxIops();
+        this._iScsiName = that.get_iScsiName();
         this.diskOfferingId = that.getDiskOfferingId();
         this.poolId = that.getPoolId();
         this.attached = that.getAttached();
@@ -275,6 +295,24 @@ public class VolumeVO implements Volume {
     }
 
     @Override
+    public Long getMinIops() {
+        return minIops;
+    }
+
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    @Override
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
+
+    @Override
     public Long getInstanceId() {
         return instanceId;
     }
@@ -464,6 +502,15 @@ public class VolumeVO implements Volume {
         this.uuid = uuid;
     }
 
+    @Override
+    public String get_iScsiName() {
+    	return this._iScsiName;
+    }
+
+    public void set_iScsiName(String iScsiName) {
+    	this._iScsiName = iScsiName;
+    }
+
     public boolean isDisplayVolume() {
         return displayVolume;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/schema/src/com/cloud/storage/dao/VolumeDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/com/cloud/storage/dao/VolumeDao.java
index 79c0dc3..fb7dc70 100755
--- a/engine/schema/src/com/cloud/storage/dao/VolumeDao.java
+++ b/engine/schema/src/com/cloud/storage/dao/VolumeDao.java
@@ -58,6 +58,8 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
 
     List<VolumeVO> findByPoolId(long poolId);
 
+    List<VolumeVO> findByPoolId(long poolId, Volume.Type volumeType);
+
     List<VolumeVO> findByInstanceAndDeviceId(long instanceId, long deviceId);
 
     List<VolumeVO> findUsableVolumesForInstance(long instanceId);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java
index f82b511..ba85466 100755
--- a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java
+++ b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java
@@ -109,6 +109,19 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
         sc.setParameters("poolId", poolId);
         sc.setParameters("notDestroyed", Volume.State.Destroy);
         sc.setParameters("vType", Volume.Type.ROOT.toString());
+	    return listBy(sc);
+	}
+
+    @Override
+    public List<VolumeVO> findByPoolId(long poolId, Volume.Type volumeType) {
+        SearchCriteria<VolumeVO> sc = AllFieldsSearch.create();
+        sc.setParameters("poolId", poolId);
+        sc.setParameters("notDestroyed", Volume.State.Destroy);
+
+        if (volumeType != null) {
+            sc.setParameters("vType", volumeType.toString());
+        }
+
         return listBy(sc);
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
index 96c35f3..da62712 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
@@ -166,7 +166,7 @@ public class TemplateServiceImpl implements TemplateService {
 
         AsyncCallbackDispatcher<TemplateServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().createTemplateCallback(null, null)).setContext(context);
-        store.getDriver().createAsync(templateOnStore, caller);
+        store.getDriver().createAsync(store, templateOnStore, caller);
     }
 
     @Override
@@ -511,7 +511,7 @@ public class TemplateServiceImpl implements TemplateService {
         TemplateOpContext<TemplateApiResult> context = new TemplateOpContext<TemplateApiResult>(null, to, future);
         AsyncCallbackDispatcher<TemplateServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().deleteTemplateCallback(null, null)).setContext(context);
-        to.getDataStore().getDriver().deleteAsync(to, caller);
+        to.getDataStore().getDriver().deleteAsync(to.getDataStore(), to, caller);
         return future;
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
index 6d8e8e5..438ab69 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
@@ -145,7 +145,7 @@ public class ImageStoreImpl implements ImageStoreEntity {
     @Override
     public boolean delete(DataObject obj) {
         AsyncCallFuture<CommandResult> future = new AsyncCallFuture<CommandResult>();
-        this.driver.deleteAsync(obj, future);
+        this.driver.deleteAsync(obj.getDataStore(), obj, future);
         try {
             future.get();
         } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
index 40d9d41..90696ca 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
@@ -148,7 +148,8 @@ public class StorageAllocatorTest {
         diskOffering = diskOfferingDao.persist(diskOffering);
         diskOfferingId = diskOffering.getId();
 
-        volume = new VolumeVO(Volume.Type.ROOT, "volume", dcId, 1, 1, diskOffering.getId(), diskOffering.getDiskSize());
+        volume = new VolumeVO(Volume.Type.ROOT, "volume", dcId, 1, 1, diskOffering.getId(), diskOffering.getDiskSize(),
+                diskOffering.getMinIops(), diskOffering.getMaxIops(), "");
         volume = volumeDao.persist(volume);
         volumeId = volume.getId();
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java
index 2579a38..f1eed3a 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java
@@ -347,7 +347,7 @@ public class SnapshotTest extends CloudStackTestNGBase {
 
     private VolumeVO createVolume(Long templateId, long dataStoreId) {
 
-        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000);
+        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000, 0L, 0L, "");
         volume.setDataCenterId(this.dcId);
         volume.setPoolId(dataStoreId);
         volume = volumeDao.persist(volume);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTest.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTest.java
index 70fdb1b..cbfafc9 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTest.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTest.java
@@ -317,7 +317,7 @@ public class VolumeTest extends CloudStackTestNGBase {
     }
 
     private VolumeVO createVolume(Long templateId, long dataStoreId) {
-        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000);
+        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000, 0L, 0L, "");
         ;
         volume.setPoolId(dataStoreId);
         volume = volumeDao.persist(volume);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTestVmware.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTestVmware.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTestVmware.java
index 4acc8dc..be9dd19 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTestVmware.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/VolumeTestVmware.java
@@ -317,7 +317,7 @@ public class VolumeTestVmware extends CloudStackTestNGBase {
     }
 
     private VolumeVO createVolume(Long templateId, long dataStoreId) {
-        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000);
+        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000, 0L, 0L, "");
         ;
         volume.setPoolId(dataStoreId);
         volume = volumeDao.persist(volume);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java
index 42b0463..08de7f3 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java
@@ -363,7 +363,7 @@ public class volumeServiceTest extends CloudStackTestNGBase {
     }
 
     private VolumeVO createVolume(Long templateId, long dataStoreId) {
-        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000);
+        VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), this.dcId, 1L, 1L, 1L, 1000, 0L, 0L, "");
         volume.setPoolId(dataStoreId);
         volume = volumeDao.persist(volume);
         return volume;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
index 631d220..48ec512 100644
--- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
+++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java
@@ -355,7 +355,7 @@ public class SnapshotServiceImpl implements SnapshotService {
         AsyncCallbackDispatcher<SnapshotServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().deleteSnapshotCallback(null, null)).setContext(context);
         DataStore store = snapInfo.getDataStore();
-        store.getDriver().deleteAsync(snapInfo, caller);
+        store.getDriver().deleteAsync(store, snapInfo, caller);
 
         SnapshotResult result = null;
         try {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
index 29b3400..300d932 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -49,26 +49,38 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
         Volume volume = _volumeDao.findById(dskCh.getVolumeId());
         List<Volume> requestVolumes = new ArrayList<Volume>();
         requestVolumes.add(volume);
-        return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+
+        return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) &&
+               storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
     }
 
-    @Override
-    protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
-            DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-        s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
-        List<StoragePool> suitablePools = new ArrayList<StoragePool>();
-        HypervisorType hypervisor = dskCh.getHypervisorType();
-        if (hypervisor != null) {
-            if (hypervisor != HypervisorType.KVM && hypervisor != HypervisorType.VMware) {
-                s_logger.debug("Only kvm, VMware hypervisors are enabled to support zone wide storage");
-                return suitablePools;
+	@Override
+	protected List<StoragePool> select(DiskProfile dskCh,
+			VirtualMachineProfile<? extends VirtualMachine> vmProfile,
+			DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+	    s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
+		List<StoragePool> suitablePools = new ArrayList<StoragePool>();
+
+        List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
+
+        if (storagePools == null) {
+            storagePools = new ArrayList<StoragePoolVO>();
+        }
+
+        List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<StoragePoolVO>();
+
+        for (StoragePoolVO storagePool : storagePools) {
+            if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
+                anyHypervisorStoragePools.add(storagePool);
             }
         }
 
-        List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
         List<StoragePoolVO> storagePoolsByHypervisor = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType());
+
         storagePools.retainAll(storagePoolsByHypervisor);
 
+        storagePools.addAll(anyHypervisorStoragePools);
+
         // add remaining pools in zone, that did not match tags, to avoid set
         List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
         allPools.removeAll(storagePools);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
index fa9f993..7878d8d 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java
@@ -162,7 +162,7 @@ public class DataObjectManagerImpl implements DataObjectManager {
         AsyncCallbackDispatcher<DataObjectManagerImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().createAsynCallback(null, null)).setContext(context);
 
-        store.getDriver().createAsync(objInStore, caller);
+        store.getDriver().createAsync(store, objInStore, caller);
         return;
     }
 
@@ -321,7 +321,7 @@ public class DataObjectManagerImpl implements DataObjectManager {
         AsyncCallbackDispatcher<DataObjectManagerImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().deleteAsynCallback(null, null)).setContext(context);
 
-        data.getDataStore().getDriver().deleteAsync(data, caller);
+        data.getDataStore().getDriver().deleteAsync(data.getDataStore(), data, caller);
         return;
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java
index e861910..0aebee2 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java
@@ -176,6 +176,12 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity {
     }
 
     @Override
+    public Long getCapacityIops() {
+        // TODO Auto-generated method stub
+        return 0L;
+    }
+
+    @Override
     public Long getClusterId() {
         // TODO Auto-generated method stub
         return null;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
index 93b0c2b..97c1671 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
@@ -81,7 +81,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
     }
 
     @Override
-    public void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+    public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
         CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
         AsyncCallbackDispatcher<BaseImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher
                 .create(this);
@@ -184,7 +184,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
     }
 
     @Override
-    public void deleteAsync(DataObject data, AsyncCompletionCallback<CommandResult> callback) {
+    public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
         DeleteCommand cmd = new DeleteCommand(data.getTO());
 
         CommandResult result = new CommandResult();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
index 6815dec..53ead0b 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
@@ -79,6 +79,11 @@ public class PrimaryDataStoreHelper {
         dataStoreVO.setClusterId(params.getClusterId());
         dataStoreVO.setStatus(StoragePoolStatus.Initialized);
         dataStoreVO.setUserInfo(params.getUserInfo());
+        dataStoreVO.setManaged(params.isManaged());
+        dataStoreVO.setCapacityIops(params.getCapacityIops());
+        dataStoreVO.setCapacityBytes(params.getCapacityBytes());
+        dataStoreVO.setUsedBytes(params.getUsedBytes());
+        dataStoreVO.setHypervisor(params.getHypervisorType());
 
         Map<String, String> details = params.getDetails();
         String tags = params.getTags();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java
index cfdb5c0..420fd29 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java
@@ -288,6 +288,11 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore {
     }
 
     @Override
+    public Long getCapacityIops() {
+        return this.pdsv.getCapacityIops();
+    }
+
+    @Override
     public Long getClusterId() {
         return this.pdsv.getClusterId();
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
index 071c110..55fc3a6 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -107,6 +107,11 @@ public class VolumeObject implements VolumeInfo {
         volumeVO.setUuid(uuid);
     }
 
+    @Override
+    public String get_iScsiName() {
+    	return volumeVO.get_iScsiName();
+    }
+
     public void setSize(Long size) {
         volumeVO.setSize(size);
     }
@@ -126,6 +131,16 @@ public class VolumeObject implements VolumeInfo {
         return volumeVO.getSize();
     }
 
+    @Override
+    public Long getMinIops() {
+        return volumeVO.getMinIops();
+    }
+
+    @Override
+    public Long getMaxIops() {
+        return volumeVO.getMaxIops();
+    }
+
     public long getVolumeId() {
         return volumeVO.getId();
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 56b0b08..de1e423 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -35,8 +35,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
@@ -143,6 +146,16 @@ public class VolumeServiceImpl implements VolumeService {
 
     }
 
+    public ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore) {
+        DataStoreDriver dataStoreDriver = dataStore.getDriver();
+
+        if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
+            return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(volumeInfo);
+        }
+
+        return null;
+    }
+
     @Override
     public AsyncCallFuture<VolumeApiResult> createVolumeAsync(VolumeInfo volume, DataStore dataStore) {
         AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
@@ -154,7 +167,7 @@ public class VolumeServiceImpl implements VolumeService {
         AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().createVolumeCallback(null, null)).setContext(context);
 
-        dataStore.getDriver().createAsync(volumeOnStore, caller);
+        dataStore.getDriver().createAsync(dataStore, volumeOnStore, caller);
         return future;
     }
 
@@ -238,7 +251,7 @@ public class VolumeServiceImpl implements VolumeService {
         AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
         caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)).setContext(context);
 
-        volume.getDataStore().getDriver().deleteAsync(volume, caller);
+        volume.getDataStore().getDriver().deleteAsync(volume.getDataStore(), volume, caller);
         return future;
     }
 
@@ -935,7 +948,7 @@ public class VolumeServiceImpl implements VolumeService {
         caller.setCallback(caller.getTarget().registerVolumeCallback(null, null));
         caller.setContext(context);
 
-        store.getDriver().createAsync(volumeOnStore, caller);
+        store.getDriver().createAsync(store, volumeOnStore, caller);
         return future;
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index e0c00fc..914017c 100755
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -2573,7 +2573,7 @@ ServerResource {
             return new AttachVolumeAnswer(cmd, e.toString());
         }
 
-        return new AttachVolumeAnswer(cmd, cmd.getDeviceId());
+        return new AttachVolumeAnswer(cmd, cmd.getDeviceId(), cmd.getVolumePath());
     }
 
     private Answer execute(ReadyCommand cmd) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
index 21b81e1..a59949f 100644
--- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
+++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
@@ -258,7 +258,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
             }
             txn.commit();
 
-            return new AttachVolumeAnswer(cmd, cmd.getDeviceId());
+            return new AttachVolumeAnswer(cmd, cmd.getDeviceId(), cmd.getVolumePath());
         } catch (Exception ex) {
             txn.rollback();
             throw new CloudRuntimeException("Error when attaching volume " + cmd.getVolumeName() + " to VM "


[36/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
portable ip


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9e625422
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9e625422
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9e625422

Branch: refs/heads/vmsync
Commit: 9e625422835bc3037c52a3ef826e69f6545b93b4
Parents: 34eabd9
Author: radhikap <ra...@citrix.com>
Authored: Mon Jul 1 10:05:21 2013 +0530
Committer: radhikap <ra...@citrix.com>
Committed: Mon Jul 1 10:06:20 2013 +0530

----------------------------------------------------------------------
 docs/en-US/portable-ip.xml | 41 +++++++++++++++++++++++++++++++++--------
 1 file changed, 33 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9e625422/docs/en-US/portable-ip.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/portable-ip.xml b/docs/en-US/portable-ip.xml
index 81590df..3c87e54 100644
--- a/docs/en-US/portable-ip.xml
+++ b/docs/en-US/portable-ip.xml
@@ -22,13 +22,36 @@
   <title>Portable IPs</title>
   <section id="about-pip">
     <title>About Portable IP</title>
-    <para>Portable IPs in &PRODUCT; are nothing but elastic IPs that can be transferred across
-      geographically separated zones. As an administrator, you can provision a pool of portable IPs
-      at region level and are available for user consumption. The users can acquire portable IPs if
-      admin has provisioned portable public IPs at the region level they are part of. These IPs can
-      be use for any service within an advanced zone. You can also use portable IPs for EIP service
-      in basic zones. Additionally, a portable IP can be transferred from one network to another
-      network.</para>
+    <para>Portable IPs in &PRODUCT; are region-level pool of IPs, which are elastic in nature, that
+      can be transferred across geographically separated zones. As an administrator, you can
+      provision a pool of portable IPs at region level and are available for user consumption. The
+      users can acquire portable IPs if admin has provisioned portable public IPs at the region
+      level they are part of. These IPs can be use for any service within an advanced zone. You can
+      also use portable IPs for EIP services in basic zones. </para>
+    <para>The salient features of Portable IP are as follows:<itemizedlist>
+        <listitem>
+          <para>IP is statically allocated </para>
+        </listitem>
+        <listitem>
+          <para>Not required to be associated with a network </para>
+        </listitem>
+        <listitem>
+          <para>Can transfer association across the networks </para>
+        </listitem>
+        <listitem>
+          <para>Transfer IP across basic/advanced zones </para>
+        </listitem>
+        <listitem>
+          <para>Transfer across VPC, non-VPC isolated/shared networks
+          </para>
+        </listitem>
+        <listitem>
+          <para/>
+        </listitem>
+        <listitem>
+          <para/>
+        </listitem>
+      </itemizedlist></para>
   </section>
   <section id="config-pip">
     <title>Configuring Portable IPs</title>
@@ -73,7 +96,9 @@
           </listitem>
         </itemizedlist>
       </listitem>
-      <listitem><para>Click OK.</para></listitem>
+      <listitem>
+        <para>Click OK.</para>
+      </listitem>
     </orderedlist>
   </section>
   <section id="acquire-pip">


[30/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix test_assign_vm to be discovered by nose testrunner

the nosetests runner will not discover tests unless the tests start with
the test_ prefix. When wrapping by the log_test_exceptions method the
name of the method changes and is not test_ anymore causing the runner
to skip the test. Included the __doc__ string as well so the runner can
report results correctly.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/2a51c3e2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/2a51c3e2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/2a51c3e2

Branch: refs/heads/vmsync
Commit: 2a51c3e2c9e7fd7d96afb9b9fee7199329bb7fdf
Parents: e14f355
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sun Jun 30 12:50:32 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sun Jun 30 12:52:17 2013 +0530

----------------------------------------------------------------------
 test/integration/component/test_assign_vm.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/2a51c3e2/test/integration/component/test_assign_vm.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_assign_vm.py b/test/integration/component/test_assign_vm.py
index 1dc93a8..8bc98fe 100644
--- a/test/integration/component/test_assign_vm.py
+++ b/test/integration/component/test_assign_vm.py
@@ -40,13 +40,14 @@ from marvin.integration.lib.common import (get_domain,
                                            list_virtual_machines)
 
 def log_test_exceptions(func):
-    def _log_test_exceptions(self, *args, **kwargs):
+    def test_wrap_exception_log(self, *args, **kwargs):
         try:
             func(self, *args, **kwargs)
         except Exception as e:
             self.debug('Test %s Failed due to Exception=%s' % (func, e))
             raise e
-    return _log_test_exceptions
+    test_wrap_exception_log.__doc__ = func.__doc__
+    return test_wrap_exception_log
 
 class Services:
     """Test service data for:Change the ownershop of
@@ -229,6 +230,7 @@ class TestVMOwnership(cloudstackTestCase):
         except Exception as e:
             self.debug("Warning! Exception in tearDown: %s" % e)
 
+
     @attr(tags = ["advanced"])
     @log_test_exceptions
     def test_01_move_across_different_domains(self):


[32/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
pep8 fix

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/3684bafd
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/3684bafd
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/3684bafd

Branch: refs/heads/vmsync
Commit: 3684bafd37db5884dbd3594b8f1408848fcdffb8
Parents: c731597
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sun Jun 30 13:58:16 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sun Jun 30 13:58:16 2013 +0530

----------------------------------------------------------------------
 tools/marvin/marvin/deployDataCenter.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/3684bafd/tools/marvin/marvin/deployDataCenter.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py
index 961718a..d704375 100644
--- a/tools/marvin/marvin/deployDataCenter.py
+++ b/tools/marvin/marvin/deployDataCenter.py
@@ -120,7 +120,8 @@ specify a valid config file" % cfgFile)
                 self.createVlanIpRanges("Basic", pod.guestIpRanges, zoneId,
                                         podId, networkId)
 
-            self.createClusters(pod.clusters, zoneId, podId, vmwareDc=pod.vmwaredc)
+            self.createClusters(pod.clusters, zoneId, podId,
+                                vmwareDc=pod.vmwaredc)
 
     def createVlanIpRanges(self, mode, ipranges, zoneId, podId=None,
                            networkId=None, forvirtualnetwork=None):


[16/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix typo in backupSnapshotCommand

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/085e8838
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/085e8838
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/085e8838

Branch: refs/heads/vmsync
Commit: 085e8838e43f8a523035b0e1370fae00d706fda1
Parents: de38cd8
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Wed Jun 26 18:59:20 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:50:25 2013 +0530

----------------------------------------------------------------------
 .../storage/resource/StorageProcessor.java      |  2 +-
 .../StorageSubsystemCommandHandlerBase.java     |  2 +-
 .../kvm/storage/KVMStorageProcessor.java        |  2 +-
 .../resource/SimulatorStorageProcessor.java     |  2 +-
 .../resource/VmwareStorageProcessor.java        |  2 +-
 .../xen/resource/XenServerStorageProcessor.java | 71 ++++++++++----------
 6 files changed, 40 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/core/src/com/cloud/storage/resource/StorageProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/storage/resource/StorageProcessor.java b/core/src/com/cloud/storage/resource/StorageProcessor.java
index ca441ed..f503fa3 100644
--- a/core/src/com/cloud/storage/resource/StorageProcessor.java
+++ b/core/src/com/cloud/storage/resource/StorageProcessor.java
@@ -32,7 +32,7 @@ public interface StorageProcessor {
     public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd);
     public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd);
     public Answer createTemplateFromVolume(CopyCommand cmd);
-    public Answer backupSnasphot(CopyCommand cmd);
+    public Answer backupSnapshot(CopyCommand cmd);
     public Answer attachIso(AttachCommand cmd);
     public Answer attachVolume(AttachCommand cmd);
     public Answer dettachIso(DettachCommand cmd);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java
index 23ccd31..c0bbfbe 100644
--- a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java
+++ b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java
@@ -81,7 +81,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
                 return processor.createTemplateFromVolume(cmd);
             }
         } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) {
-            return processor.backupSnasphot(cmd);
+            return processor.backupSnapshot(cmd);
         } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.VOLUME) {
         	return processor.createVolumeFromSnapshot(cmd);
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 8dce094..25c94f7 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -404,7 +404,7 @@ public class KVMStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer backupSnasphot(CopyCommand cmd) {
+    public Answer backupSnapshot(CopyCommand cmd) {
         DataTO srcData = cmd.getSrcTO();
         DataTO destData = cmd.getDestTO();
         SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
index 16ba367..d2be9cf 100644
--- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
+++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java
@@ -94,7 +94,7 @@ public class SimulatorStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer backupSnasphot(CopyCommand cmd) {
+    public Answer backupSnapshot(CopyCommand cmd) {
         DataTO srcData = cmd.getSrcTO();
         DataTO destData = cmd.getDestTO();
         SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
index d3df0f5..421fb22 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
@@ -652,7 +652,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
 	        return backupUuid + "/" + backupUuid;
 	    }
 	@Override
-	public Answer backupSnasphot(CopyCommand cmd) {
+	public Answer backupSnapshot(CopyCommand cmd) {
 		SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO();
 		PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcSnapshot.getDataStore();
 		SnapshotObjectTO destSnapshot = (SnapshotObjectTO)cmd.getDestTO();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/085e8838/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
index e6358f2..074375e 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
@@ -18,41 +18,6 @@
  */
 package com.cloud.hypervisor.xen.resource;
 
-import java.beans.BeanInfo;
-import java.beans.IntrospectionException;
-import java.beans.Introspector;
-import java.beans.PropertyDescriptor;
-import java.io.File;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.cloudstack.storage.command.AttachAnswer;
-import org.apache.cloudstack.storage.command.AttachCommand;
-import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
-import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
-import org.apache.cloudstack.storage.command.CopyCmdAnswer;
-import org.apache.cloudstack.storage.command.CopyCommand;
-import org.apache.cloudstack.storage.command.CreateObjectAnswer;
-import org.apache.cloudstack.storage.command.CreateObjectCommand;
-import org.apache.cloudstack.storage.command.DeleteCommand;
-import org.apache.cloudstack.storage.command.DettachAnswer;
-import org.apache.cloudstack.storage.command.DettachCommand;
-import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
-import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
-import org.apache.cloudstack.storage.to.SnapshotObjectTO;
-import org.apache.cloudstack.storage.to.TemplateObjectTO;
-import org.apache.cloudstack.storage.to.VolumeObjectTO;
-import org.apache.log4j.Logger;
-import org.apache.xmlrpc.XmlRpcException;
-
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.CreateStoragePoolCommand;
 import com.cloud.agent.api.to.DataObjectType;
@@ -87,6 +52,40 @@ import com.xensource.xenapi.VBD;
 import com.xensource.xenapi.VDI;
 import com.xensource.xenapi.VM;
 import com.xensource.xenapi.VMGuestMetrics;
+import org.apache.cloudstack.storage.command.AttachAnswer;
+import org.apache.cloudstack.storage.command.AttachCommand;
+import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
+import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
+import org.apache.cloudstack.storage.command.CopyCmdAnswer;
+import org.apache.cloudstack.storage.command.CopyCommand;
+import org.apache.cloudstack.storage.command.CreateObjectAnswer;
+import org.apache.cloudstack.storage.command.CreateObjectCommand;
+import org.apache.cloudstack.storage.command.DeleteCommand;
+import org.apache.cloudstack.storage.command.DettachAnswer;
+import org.apache.cloudstack.storage.command.DettachCommand;
+import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
+import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
+import org.apache.cloudstack.storage.to.SnapshotObjectTO;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.log4j.Logger;
+import org.apache.xmlrpc.XmlRpcException;
+
+import java.beans.BeanInfo;
+import java.beans.IntrospectionException;
+import java.beans.Introspector;
+import java.beans.PropertyDescriptor;
+import java.io.File;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 
 public class XenServerStorageProcessor implements StorageProcessor {
     private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class);
@@ -1221,7 +1220,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
     }
 
     @Override
-    public Answer backupSnasphot(CopyCommand cmd) {
+    public Answer backupSnapshot(CopyCommand cmd) {
         Connection conn = hypervisorResource.getConnection();
         DataTO srcData = cmd.getSrcTO();
         DataTO cacheData = cmd.getCacheTO();


[49/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Another merge from master.  This is just getting laborious


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/57bafc8f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/57bafc8f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/57bafc8f

Branch: refs/heads/vmsync
Commit: 57bafc8fe9be49831416244751c959dd1194330f
Parents: 1f5c672 16b7509
Author: Alex Huang <al...@gmail.com>
Authored: Mon Jul 1 11:29:57 2013 -0700
Committer: Alex Huang <al...@gmail.com>
Committed: Mon Jul 1 11:29:57 2013 -0700

----------------------------------------------------------------------
 agent/src/com/cloud/agent/VmmAgentShell.java    | 504 ----------
 api/src/com/cloud/agent/api/to/DiskTO.java      |  12 +-
 .../com/cloud/agent/api/to/FirewallRuleTO.java  |   6 +
 api/src/com/cloud/network/NetworkModel.java     |   2 +
 api/src/com/cloud/offering/DiskOffering.java    |  16 +-
 api/src/com/cloud/storage/Storage.java          |   1 +
 api/src/com/cloud/storage/StoragePool.java      |   2 +
 api/src/com/cloud/storage/Volume.java           |   6 +
 .../org/apache/cloudstack/api/ApiConstants.java |   6 +
 .../admin/offering/CreateDiskOfferingCmd.java   |  27 +-
 .../admin/storage/CreateStoragePoolCmd.java     |  24 +
 .../user/template/RegisterTemplateCmd.java      |   2 +-
 .../command/user/volume/CreateVolumeCmd.java    |  14 +
 .../api/response/DiskOfferingResponse.java      |  33 +
 .../api/response/StoragePoolResponse.java       |  11 +
 .../api/response/TemplateZoneResponse.java      |  53 +-
 .../cloudstack/api/response/VolumeResponse.java |  16 +
 .../bridge/service/core/ec2/EC2Engine.java      |  18 +-
 .../classes/resources/messages.properties       |   7 +-
 client/pom.xml                                  |  12 +-
 client/tomcatconf/applicationContext.xml.in     | 161 ++-
 client/tomcatconf/commands.properties.in        |   6 +
 client/tomcatconf/componentContext.xml.in       |  40 +-
 client/tomcatconf/nonossComponentContext.xml.in |  41 +-
 .../tomcatconf/simulatorComponentContext.xml.in |  52 +-
 .../com/cloud/agent/api/AttachVolumeAnswer.java |  16 +-
 .../cloud/agent/api/AttachVolumeCommand.java    | 114 ++-
 .../hyperv/resource/HypervResource.java         | 979 -------------------
 .../storage/resource/StorageProcessor.java      |   2 +-
 .../StorageSubsystemCommandHandlerBase.java     |   2 +-
 .../api/agent/test/AttachVolumeAnswerTest.java  |   6 +-
 .../api/agent/test/AttachVolumeCommandTest.java |  10 +-
 .../agent/test/BackupSnapshotCommandTest.java   |   5 +
 .../api/agent/test/CheckNetworkAnswerTest.java  |   5 +
 .../api/agent/test/SnapshotCommandTest.java     |   4 +
 developer/pom.xml                               |   6 +
 docs/en-US/attaching-volume.xml                 |  57 +-
 docs/en-US/creating-a-plugin.xml                |  29 +
 docs/en-US/creating-my-first-plugin.xml         | 216 ++++
 docs/en-US/creating-new-volumes.xml             |  99 +-
 docs/en-US/detach-move-volumes.xml              |  58 +-
 docs/en-US/networks.xml                         |   1 +
 docs/en-US/plugin-development.xml               |  28 +
 docs/en-US/portable-ip.xml                      |  40 +-
 docs/en-US/storage.xml                          |  15 +-
 docs/en-US/upload-existing-volume-to-vm.xml     | 129 ++-
 docs/en-US/vm-storage-migration.xml             |  27 +-
 .../volume-deletion-garbage-collection.xml      |  29 +-
 docs/en-US/working-with-volumes.xml             |  51 +-
 docs/qig/en-US/Author_Group.xml                 |  32 +
 docs/qig/en-US/Book_Info.xml                    |  52 +
 docs/qig/en-US/Chapter.xml                      |  53 +
 docs/qig/en-US/Environment.xml                  | 258 +++++
 docs/qig/en-US/Management.xml                   |  99 ++
 docs/qig/en-US/Overview.xml                     |  93 ++
 docs/qig/en-US/Preface.xml                      |  33 +
 docs/qig/en-US/Revision_History.xml             |  42 +
 docs/qig/en-US/config.xml                       | 177 ++++
 docs/qig/en-US/kvm.xml                          | 142 +++
 docs/qig/en-US/qig.ent                          |  22 +
 docs/qig/en-US/qig.xml                          |  36 +
 .../engine/subsystem/api/storage/ChapInfo.java  |  26 +
 .../subsystem/api/storage/DataStoreDriver.java  |  14 +-
 .../api/storage/PrimaryDataStoreDriver.java     |   6 +-
 .../api/storage/PrimaryDataStoreParameters.java |  50 +
 .../subsystem/api/storage/VolumeService.java    |   2 +
 .../storage/command/AttachCommand.java          |  71 ++
 .../storage/command/DettachCommand.java         |  17 +
 .../storage/datastore/db/StoragePoolVO.java     |  26 +-
 .../src/com/cloud/storage/VolumeManager.java    |   1 -
 engine/schema/src/com/cloud/dc/VlanVO.java      |   4 +
 .../src/com/cloud/network/dao/IPAddressDao.java |   6 +
 .../com/cloud/network/dao/IPAddressDaoImpl.java |  23 +
 .../com/cloud/network/dao/NetworkDaoImpl.java   |   5 +
 .../src/com/cloud/storage/DiskOfferingVO.java   |  46 +-
 .../schema/src/com/cloud/storage/VolumeVO.java  |  57 +-
 .../cloud/storage/dao/VMTemplateDaoImpl.java    |  17 +-
 .../src/com/cloud/storage/dao/VolumeDao.java    |   2 +
 .../com/cloud/storage/dao/VolumeDaoImpl.java    |  13 +
 .../com/cloud/upgrade/dao/Upgrade2214to30.java  |   4 +-
 .../cache/manager/StorageCacheManagerImpl.java  |   8 +-
 .../motion/AncientDataMotionStrategy.java       |  53 +-
 .../storage/motion/DataMotionServiceImpl.java   |  18 +-
 .../storage/image/TemplateServiceImpl.java      |  11 +-
 .../storage/image/store/ImageStoreImpl.java     |   2 +-
 .../storage/allocator/StorageAllocatorTest.java |   3 +-
 .../cloudstack/storage/test/SnapshotTest.java   |   2 +-
 .../cloudstack/storage/test/VolumeTest.java     |   2 +-
 .../storage/test/VolumeTestVmware.java          |   2 +-
 .../storage/test/volumeServiceTest.java         |   2 +-
 .../test/resource/component.xml                 |   9 -
 .../test/resource/storageContext.xml            |   3 +-
 .../storage/snapshot/SnapshotServiceImpl.java   |  10 +-
 .../allocator/ZoneWideStoragePoolAllocator.java |  25 +-
 .../datastore/DataObjectManagerImpl.java        |  12 +-
 .../storage/datastore/DataStoreManagerImpl.java |  29 +-
 .../datastore/PrimaryDataStoreEntityImpl.java   |   6 +
 .../provider/DataStoreProviderManagerImpl.java  |  36 +-
 .../storage/image/BaseImageStoreDriverImpl.java |  49 +-
 .../datastore/PrimaryDataStoreHelper.java       |   5 +
 .../storage/datastore/PrimaryDataStoreImpl.java |   5 +
 .../cloudstack/storage/volume/VolumeObject.java |  15 +
 .../storage/volume/VolumeServiceImpl.java       | 103 +-
 .../framework/async/AsyncRpcConext.java         |  30 -
 .../framework/async/AsyncRpcContext.java        |  30 +
 .../AsyncSampleEventDrivenStyleCaller.java      |   4 +-
 packaging/centos63/cloud.spec                   |   1 +
 .../kvm/resource/LibvirtComputingResource.java  |  28 +-
 .../kvm/storage/KVMStorageProcessor.java        |   2 +-
 .../resource/LibvirtComputingResourceTest.java  |  14 +
 .../agent/manager/MockAgentManagerImpl.java     |  42 +-
 .../agent/manager/MockStorageManagerImpl.java   |  17 +-
 .../resource/SimulatorStorageProcessor.java     |   2 +-
 .../com/cloud/ucs/manager/AddUcsManagerCmd.java | 126 ---
 .../manager/AssociateUcsProfileToBladeCmd.java  |  96 --
 .../com/cloud/ucs/manager/ListUcsBladeCmd.java  |  87 --
 .../cloud/ucs/manager/ListUcsManagerCmd.java    |  88 --
 .../cloud/ucs/manager/ListUcsProfileCmd.java    |  81 --
 .../com/cloud/ucs/manager/UcsBladeResponse.java |  84 --
 .../src/com/cloud/ucs/manager/UcsManager.java   |   8 +
 .../com/cloud/ucs/manager/UcsManagerImpl.java   |   8 +
 .../cloud/ucs/manager/UcsManagerResponse.java   |  73 --
 .../cloud/ucs/manager/UcsProfileResponse.java   |  37 -
 .../apache/cloudstack/api/AddUcsManagerCmd.java | 128 +++
 .../api/AssociateUcsProfileToBladeCmd.java      |  99 ++
 .../apache/cloudstack/api/ListUcsBladeCmd.java  |  90 ++
 .../cloudstack/api/ListUcsManagerCmd.java       |  90 ++
 .../cloudstack/api/ListUcsProfileCmd.java       |  84 ++
 .../api/response/UcsBladeResponse.java          |  84 ++
 .../api/response/UcsManagerResponse.java        |  73 ++
 .../api/response/UcsProfileResponse.java        |  37 +
 .../manager/VmwareStorageManagerImpl.java       |  16 +-
 .../vmware/resource/VmwareResource.java         | 283 +++++-
 .../resource/VmwareStorageProcessor.java        |   2 +-
 .../motion/VmwareStorageMotionStrategyTest.java |   4 +-
 .../xen/resource/CitrixResourceBase.java        | 392 ++++----
 .../xen/resource/XenServerStorageProcessor.java |  89 +-
 .../resources/components-example.xml            |   2 -
 .../network/resource/NetscalerResource.java     | 121 ++-
 .../src/com/cloud/api/commands/AddSspCmd.java   | 139 ---
 .../com/cloud/api/commands/DeleteSspCmd.java    |  75 --
 .../src/com/cloud/api/response/SspResponse.java |  77 --
 .../com/cloud/network/dao/SspCredentialDao.java |  33 -
 .../cloud/network/dao/SspCredentialDaoImpl.java |  42 -
 .../com/cloud/network/dao/SspCredentialVO.java  |  67 --
 .../src/com/cloud/network/dao/SspTenantDao.java |  34 -
 .../com/cloud/network/dao/SspTenantDaoImpl.java |  48 -
 .../src/com/cloud/network/dao/SspTenantVO.java  |  55 --
 .../src/com/cloud/network/dao/SspUuidDao.java   |  36 -
 .../com/cloud/network/dao/SspUuidDaoImpl.java   | 116 ---
 .../src/com/cloud/network/dao/SspUuidVO.java    |  73 --
 .../com/cloud/network/element/SspClient.java    | 272 ------
 .../com/cloud/network/element/SspElement.java   | 620 ------------
 .../com/cloud/network/element/SspManager.java   |  71 --
 .../com/cloud/network/element/SspService.java   |  46 -
 .../cloud/network/guru/SspGuestNetworkGuru.java | 170 ----
 .../cloudstack/api/commands/AddSspCmd.java      | 139 +++
 .../cloudstack/api/commands/DeleteSspCmd.java   |  75 ++
 .../cloudstack/api/response/SspResponse.java    |  77 ++
 .../network/dao/SspCredentialDao.java           |  33 +
 .../network/dao/SspCredentialDaoImpl.java       |  42 +
 .../cloudstack/network/dao/SspCredentialVO.java |  67 ++
 .../cloudstack/network/dao/SspTenantDao.java    |  34 +
 .../network/dao/SspTenantDaoImpl.java           |  48 +
 .../cloudstack/network/dao/SspTenantVO.java     |  55 ++
 .../cloudstack/network/dao/SspUuidDao.java      |  36 +
 .../cloudstack/network/dao/SspUuidDaoImpl.java  | 116 +++
 .../cloudstack/network/dao/SspUuidVO.java       |  73 ++
 .../cloudstack/network/element/SspClient.java   | 272 ++++++
 .../cloudstack/network/element/SspElement.java  | 622 ++++++++++++
 .../cloudstack/network/element/SspManager.java  |  71 ++
 .../cloudstack/network/element/SspService.java  |  47 +
 .../network/guru/SspGuestNetworkGuru.java       | 172 ++++
 .../cloud/network/element/SspClientTest.java    |  92 --
 .../cloud/network/element/SspElementTest.java   | 152 ---
 .../network/element/SspClientTest.java          |  93 ++
 .../network/element/SspElementTest.java         | 153 +++
 plugins/pom.xml                                 |   7 +-
 .../CloudStackImageStoreLifeCycleImpl.java      |  34 +-
 .../driver/SampleImageStoreDriverImpl.java      |   2 -
 plugins/storage/image/simulator/pom.xml         |  68 ++
 .../driver/SimulatorImageStoreDriverImpl.java   | 106 ++
 .../SimulatorImageStoreLifeCycleImpl.java       | 132 +++
 .../SimulatorImageStoreProviderImpl.java        |  98 ++
 .../CloudStackPrimaryDataStoreDriverImpl.java   |   9 +-
 .../SamplePrimaryDataStoreDriverImpl.java       |  21 +-
 plugins/storage/volume/solidfire/pom.xml        |   7 +-
 .../driver/SolidfirePrimaryDataStoreDriver.java | 480 ++++++++-
 .../SolidFirePrimaryDataStoreLifeCycle.java     | 274 ++++++
 .../SolidfirePrimaryDataStoreProvider.java      |  81 +-
 .../storage/datastore/util/SolidFireUtil.java   | 917 +++++++++++++++++
 server/src/com/cloud/acl/DomainChecker.java     |   8 +-
 .../api/query/dao/DiskOfferingJoinDaoImpl.java  |   3 +
 .../api/query/dao/StoragePoolJoinDaoImpl.java   |   2 +
 .../api/query/dao/TemplateJoinDaoImpl.java      |  80 +-
 .../cloud/api/query/dao/VolumeJoinDaoImpl.java  |   3 +
 .../cloud/api/query/vo/DiskOfferingJoinVO.java  |  33 +
 .../cloud/api/query/vo/StoragePoolJoinVO.java   |  13 +-
 .../com/cloud/api/query/vo/VolumeJoinVO.java    |  21 +-
 server/src/com/cloud/configuration/Config.java  |   2 +-
 .../configuration/ConfigurationManager.java     |   6 +-
 .../configuration/ConfigurationManagerImpl.java | 194 ++--
 .../deploy/DeploymentPlanningManagerImpl.java   |  13 +
 .../com/cloud/hypervisor/guru/HypervGuru.java   |  72 --
 .../hyperv/HypervServerDiscoverer.java          | 243 -----
 .../network/ExternalDeviceUsageManager.java     |  33 +
 .../network/ExternalDeviceUsageManagerImpl.java | 673 +++++++++++++
 .../ExternalLoadBalancerUsageManager.java       |  33 -
 .../ExternalLoadBalancerUsageManagerImpl.java   | 673 -------------
 .../com/cloud/network/NetworkManagerImpl.java   |   5 +
 .../src/com/cloud/network/NetworkModelImpl.java |  21 +
 .../cloud/network/NetworkUsageManagerImpl.java  |  31 +-
 .../lb/LoadBalancingRulesManagerImpl.java       |   6 +-
 .../VirtualNetworkApplianceManagerImpl.java     |  11 +-
 .../com/cloud/resource/ResourceManagerImpl.java |   6 -
 .../cloud/server/ConfigurationServerImpl.java   |   2 +-
 .../src/com/cloud/storage/StorageManager.java   |   9 +-
 .../com/cloud/storage/StorageManagerImpl.java   | 207 ++--
 .../com/cloud/storage/VolumeManagerImpl.java    | 149 ++-
 .../secondary/SecondaryStorageManagerImpl.java  |   5 +-
 .../template/HypervisorTemplateAdapter.java     | 402 ++++----
 .../com/cloud/template/TemplateAdapterBase.java |   4 +-
 .../com/cloud/template/TemplateManagerImpl.java |   2 +-
 server/src/com/cloud/test/DatabaseConfig.java   |   2 +-
 server/src/com/cloud/vm/UserVmManagerImpl.java  |   4 +-
 .../cloudstack/region/PortableIpDaoImpl.java    |   2 +-
 .../GlobalLoadBalancingRulesServiceImpl.java    |   5 +-
 .../com/cloud/network/MockNetworkModelImpl.java |   5 +
 .../network/UpdatePhysicalNetworkTest.java      |   3 +
 .../cloud/vpc/MockConfigurationManagerImpl.java |   3 +-
 .../com/cloud/vpc/MockNetworkModelImpl.java     |   4 +
 setup/db/db/schema-410to420.sql                 |  22 +
 test/integration/component/test_accounts.py     |   2 +-
 .../component/test_advancedsg_networks.py       |   7 +-
 .../component/test_affinity_groups.py           | 135 +--
 test/integration/component/test_assign_vm.py    |   6 +-
 test/integration/component/test_ldap.py         |  11 +-
 .../component/test_netscaler_configs.py         |   8 +-
 .../component/test_shared_networks.py           |   2 +-
 .../component/test_vpc_network_lbrules.py       |   4 +-
 .../component/test_vpc_network_pfrules.py       |   4 +-
 .../component/test_vpc_network_staticnatrule.py |   4 +-
 test/integration/component/test_vpc_routers.py  |  78 +-
 ...deploy_vms_with_varied_deploymentplanners.py |  48 +-
 test/integration/smoke/test_network.py          | 175 ++--
 test/integration/smoke/test_vm_snapshots.py     | 519 +++++-----
 tools/apidoc/gen_toc.py                         |   4 +-
 tools/appliance/build.sh                        |   7 +-
 tools/marvin/marvin/cloudstackConnection.py     |   5 +-
 tools/marvin/marvin/codegenerator.py            |  10 +-
 tools/marvin/marvin/configGenerator.py          |  52 +-
 tools/marvin/marvin/deployDataCenter.py         |  38 +-
 tools/marvin/marvin/jsonHelper.py               |   4 +-
 ui/dictionary.jsp                               |   5 +
 ui/scripts/autoscaler.js                        |   2 +-
 ui/scripts/configuration.js                     | 179 +++-
 ui/scripts/docs.js                              |  16 +
 ui/scripts/sharedFunctions.js                   |   4 +-
 ui/scripts/storage.js                           |  56 +-
 ui/scripts/system.js                            |  19 +-
 utils/src/com/cloud/utils/CleanupDelegate.java  |  22 -
 utils/src/com/cloud/utils/FileUtil.java         |  41 +-
 utils/src/com/cloud/utils/StringUtils.java      |   8 +
 .../cloud/hypervisor/vmware/mo/DatastoreMO.java |   2 +-
 .../vmware/mo/HostDatastoreSystemMO.java        |  20 +-
 .../com/cloud/hypervisor/vmware/mo/HostMO.java  |  15 +-
 .../vmware/mo/HostStorageSystemMO.java          |  51 +
 .../hypervisor/vmware/mo/VirtualMachineMO.java  |  17 +
 268 files changed, 10983 insertions(+), 7582 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/com/cloud/agent/api/to/FirewallRuleTO.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
----------------------------------------------------------------------
diff --cc api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
index 2b18329,f5750b9..2ee5a30
--- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java
@@@ -120,11 -131,23 +132,23 @@@ public class CreateStoragePoolCmd exten
      public String getStorageProviderName() {
          return this.storageProviderName;
      }
 -
 +    
      public String getScope() {
 -        return this.scope;
 +       return this.scope;
      }
  
+     public Boolean isManaged() {
+     	return managed;
+     }
+ 
+     public Long getCapacityIops() {
+         return capacityIops;
+     }
+ 
+     public Long getCapacityBytes() {
+         return capacityBytes;
+     }
+ 
      public String getHypervisor() {
          return hypervisor;
      }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/response/DiskOfferingResponse.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/api/src/org/apache/cloudstack/api/response/VolumeResponse.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --cc client/tomcatconf/applicationContext.xml.in
index 627c655,14255c1..8031911
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@@ -17,16 -17,14 +17,14 @@@
    under the License.
  -->
  <beans xmlns="http://www.springframework.org/schema/beans"
 -       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 -       xmlns:context="http://www.springframework.org/schema/context"
 -       xsi:schemaLocation="http://www.springframework.org/schema/beans
 +  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
 +  xmlns:context="http://www.springframework.org/schema/context"
-   xmlns:tx="http://www.springframework.org/schema/tx" 
-   xmlns:aop="http://www.springframework.org/schema/aop"
 +  xsi:schemaLocation="http://www.springframework.org/schema/beans
                        http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
-                       http://www.springframework.org/schema/tx 
-                       http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
-                       http://www.springframework.org/schema/aop
-                       http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
+ 
+ 
+ 
+ 
                        http://www.springframework.org/schema/context
                        http://www.springframework.org/schema/context/spring-context-3.0.xsd">                     
  
@@@ -613,7 -611,96 +607,96 @@@
    <bean id="OvmGuru" class="com.cloud.ovm.hypervisor.OvmGuru">
      <property name="name" value="OvmGuru"/>
    </bean>
 -
 +  
+ 
+   <!--=====================================================================================================-->
+   <!--                                                                                                     -->
+   <!--                           Storage Subsystem Components and Helpers                                  -->
+   <!--                                                                                                     -->
+   <!--=====================================================================================================-->
+ 
+   <!--Filesystem types-->
+   <bean id="iSCSI" class="org.apache.cloudstack.storage.datastore.type.ISCSI" />
+   <bean id="networkFileSystem" class="org.apache.cloudstack.storage.datastore.type.NetworkFileSystem" />
+ 
+   <!--Image formats-->
+   <bean id="ISO" class="org.apache.cloudstack.storage.image.format.ISO" />
+   <bean id="OVA" class="org.apache.cloudstack.storage.image.format.OVA" />
+   <bean id="QCOW2" class="org.apache.cloudstack.storage.image.format.QCOW2" />
+   <bean id="VHD" class="org.apache.cloudstack.storage.image.format.VHD" />
+   <bean id="unknown" class="org.apache.cloudstack.storage.image.format.Unknown" />
+ 
+   <!--Data Store Services -->
+   <bean id="snapshotServiceImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotServiceImpl"
+         depends-on="snapshotStateMachineManagerImpl, snapshotDataFactoryImpl, dataStoreManagerImpl, dataMotionServiceImpl, objectInDataStoreManagerImpl"/>
+   <bean id="templateServiceImpl" class="org.apache.cloudstack.storage.image.TemplateServiceImpl"
+         depends-on="dataObjectManagerImpl, dataStoreManagerImpl, dataMotionServiceImpl, objectInDataStoreManagerImpl, defaultEndPointSelector, templateDataFactoryImpl"/>
+   <bean id="volumeServiceImpl" class="org.apache.cloudstack.storage.volume.VolumeServiceImpl"
+         depends-on="snapshotManagerImpl, dataMotionServiceImpl"/>
+ 
+   <bean id="xenserverSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" />
+ 
+   <!--Data Store Factory-->
+   <bean id="templateDataFactoryImpl" class="org.apache.cloudstack.storage.image.TemplateDataFactoryImpl" />
+   <bean id="snapshotDataFactoryImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotDataFactoryImpl"
+         depends-on="dataStoreManagerImpl, snapshotDataStoreDaoImpl, volumeDataFactoryImpl"/>
+   <bean id="volumeDataFactoryImpl" class="org.apache.cloudstack.storage.volume.VolumeDataFactoryImpl" />
+ 
+   <bean id="objectInDataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.ObjectInDataStoreManagerImpl" />
+   <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />
+ 
+   <!--Data Store Helpers-->
+   <bean id="primaryDataStoreHelper" class="org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper" />
+   <bean id="imageStoreHelper" class="org.apache.cloudstack.storage.image.datastore.ImageStoreHelper" />
+   <bean id="imageFormatHelper" class="org.apache.cloudstack.storage.image.format.ImageFormatHelper" />
+ 
+   <bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" />
+   <bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl"  />
+   <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />
+ 
+   <bean id="snapshotStateMachineManagerImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManagerImpl" />
+   <bean id="defaultEndPointSelector" class="org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector" />
+ 
+ 
+   <bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" />
+   <bean id="xenserverStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.XenServerStorageMotionStrategy" />
+ 
+   <!--Data Motion Services-->
+   <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl">
+     <property name="strategies">
+       <list>
+         <ref local="ancientDataMotionStrategy"/>
+         <ref local="xenserverStorageMotionStrategy"/>
+       </list>
+     </property>
+   </bean>
+ 
+   <!--
+     Data Store Provider Manager
+   -->
+   <bean id="primaryDataStoreProviderMgr"
+         class="org.apache.cloudstack.storage.datastore.manager.PrimaryDataStoreProviderManagerImpl"/>
+   <bean id="imageStoreProviderMgr" class="org.apache.cloudstack.storage.image.manager.ImageStoreProviderManagerImpl"/>
+ 
+   <bean id="dataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.DataStoreManagerImpl"
+         depends-on="dataStoreProviderManager">
+     <property name="primaryStoreMgr" ref="primaryDataStoreProviderMgr"/>
+     <property name="imageDataStoreMgr" ref="imageStoreProviderMgr"/>
+   </bean>
+ 
+   <bean id="cloudStackPrimaryDataStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl"/>
+ 
+   <bean id="dataStoreProviderManager"
+         class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+     <property name="providers">
+       <list>
+         <!--Data Store Providers-->
+         <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
+       </list>
+     </property>
+   </bean>
+ 
    <!--
      Managers
    -->
@@@ -719,8 -810,9 +802,10 @@@
    <bean id="virtualNetworkApplianceManagerImpl" class="com.cloud.network.router.VirtualNetworkApplianceManagerImpl" />
    <bean id="vpcManagerImpl" class="com.cloud.network.vpc.VpcManagerImpl" />
    <bean id="vpcVirtualNetworkApplianceManagerImpl" class="com.cloud.network.router.VpcVirtualNetworkApplianceManagerImpl" />
 +  <bean id="usageServerMonitor" class="com.cloud.usage.UsageServerMonitor" />
  
+ 
+ 
    <!--
      Misc components
    -->
@@@ -742,46 -829,26 +822,24 @@@
    <bean id="cloudOrchestrator" class="org.apache.cloudstack.platform.orchestration.CloudOrchestrator" />
    <bean id="clusterRestService" class="org.apache.cloudstack.engine.rest.service.api.ClusterRestService" />
    <bean id="consoleProxyServlet" class="com.cloud.servlet.ConsoleProxyServlet" />
 -  <bean id="dataCenterResourceManagerImpl" class="org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceManagerImpl" />
 +  <bean id="dataCenterResourceManagerImpl" class="org.apache.cloudstack.engine.datacenter.entity.DataCenterResourceManagerImpl" />
    <bean id="dataDisk" class="org.apache.cloudstack.engine.subsystem.api.storage.type.DataDisk" />
-   <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" />
-   <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />
-   <bean id="dataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.DataStoreManagerImpl" />
-   <bean id="defaultEndPointSelector" class="org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector" />
-   <bean id="primaryDataStoreProviderManagerImpl" class="org.apache.cloudstack.storage.datastore.manager.PrimaryDataStoreProviderManagerImpl" />
-   <bean id="imageStoreProviderManagerImpl" class="org.apache.cloudstack.storage.image.manager.ImageStoreProviderManagerImpl" />  
+ 
+ 
    <bean id="eventUtils" class="com.cloud.event.EventUtils" />
-   <bean id="iSCSI" class="org.apache.cloudstack.storage.datastore.type.ISCSI" />
-   <bean id="ISO" class="org.apache.cloudstack.storage.image.format.ISO" />
-   <bean id="templateDataFactoryImpl" class="org.apache.cloudstack.storage.image.TemplateDataFactoryImpl" />
-   <bean id="imageStoreHelper" class="org.apache.cloudstack.storage.image.datastore.ImageStoreHelper" />
-   <bean id="imageFormatHelper" class="org.apache.cloudstack.storage.image.format.ImageFormatHelper" />
-   <bean id="templateServiceImpl" class="org.apache.cloudstack.storage.image.TemplateServiceImpl" />
+   <bean id="podRestService" class="org.apache.cloudstack.engine.rest.service.api.PodRestService" />
    <bean id="iso" class="org.apache.cloudstack.engine.subsystem.api.storage.type.Iso" />
-   <bean id="networkFileSystem" class="org.apache.cloudstack.storage.datastore.type.NetworkFileSystem" />
    <bean id="networkRestService" class="org.apache.cloudstack.engine.rest.service.api.NetworkRestService" />
-   <bean id="OVA" class="org.apache.cloudstack.storage.image.format.OVA" />
-   <bean id="objectInDataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.ObjectInDataStoreManagerImpl" />
-   <bean id="podRestService" class="org.apache.cloudstack.engine.rest.service.api.PodRestService" />
-   <bean id="primaryDataStoreHelper" class="org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper" />
    <bean id="provisioningServiceImpl" class="org.apache.cloudstack.engine.service.api.ProvisioningServiceImpl" />
-   <bean id="QCOW2" class="org.apache.cloudstack.storage.image.format.QCOW2" />
-   <bean id="registerCompleteServlet" class="com.cloud.servlet.RegisterCompleteServlet" />
    <bean id="rootDisk" class="org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk" />
-   <bean id="snapshotDataFactoryImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotDataFactoryImpl" />
-   <bean id="snapshotServiceImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotServiceImpl" />
-   <bean id="snapshotStateMachineManagerImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManagerImpl" />
+   <bean id="registerCompleteServlet" class="com.cloud.servlet.RegisterCompleteServlet" />
    <bean id="statsCollector" class="com.cloud.server.StatsCollector" />
    <bean id="storagePoolAutomationImpl" class="com.cloud.storage.StoragePoolAutomationImpl" />
-   <bean id="unknown" class="org.apache.cloudstack.storage.image.format.Unknown" />
    <bean id="usageEventUtils" class="com.cloud.event.UsageEventUtils" />
-   <bean id="VHD" class="org.apache.cloudstack.storage.image.format.VHD" />
 -  <bean id="userContextInitializer" class="com.cloud.user.UserContextInitializer" />
 -  <bean id="vMEntityManagerImpl" class="org.apache.cloudstack.engine.cloud.entity.api.VMEntityManagerImpl" />
 -  <bean id="virtualMachineEntityFactory" class="org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntityFactory" />
 +  <bean id="vMEntityManagerImpl" class="org.apache.cloudstack.engine.vm.VMEntityManagerImpl" />
    <bean id="virtualMachineEntityImpl" class="org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntityImpl" />
    <bean id="virtualMachineRestService" class="org.apache.cloudstack.engine.rest.service.api.VirtualMachineRestService" />
-   <bean id="volumeDataFactoryImpl" class="org.apache.cloudstack.storage.volume.VolumeDataFactoryImpl" />
    <bean id="volumeRestService" class="org.apache.cloudstack.engine.rest.service.api.VolumeRestService" />
-   <bean id="volumeServiceImpl" class="org.apache.cloudstack.storage.volume.VolumeServiceImpl" />
    <bean id="volumeTypeHelper" class="org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeTypeHelper" />
    <bean id="zoneRestService" class="org.apache.cloudstack.engine.rest.service.api.ZoneRestService" />
    <bean id="cloudZonesStartupProcessor" class="com.cloud.hypervisor.CloudZonesStartupProcessor" />
@@@ -796,17 -870,7 +861,9 @@@
    <bean id="mockNetworkManagerImpl" class="com.cloud.agent.manager.MockNetworkManagerImpl" />
    <bean id="simulatorManagerImpl" class="com.cloud.agent.manager.SimulatorManagerImpl" />
    <bean id="vMSnapshotManagerImpl" class="com.cloud.vm.snapshot.VMSnapshotManagerImpl" />
-   <bean id="volumeManagerImpl" class="com.cloud.storage.VolumeManagerImpl" />
-   <bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl" />
-   <bean id="cloudStackImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl" />
-   <bean id="s3ImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl" />
-   <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />  
-   <bean id="ApplicationLoadBalancerService" class="org.apache.cloudstack.network.lb.ApplicationLoadBalancerManagerImpl" />
-   <bean id="InternalLoadBalancerVMManager" class="org.apache.cloudstack.network.lb.InternalLoadBalancerVMManagerImpl" />
-   <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />
  
 +  <bean id="VirtualMachinePowerStateSyncImpl" class="com.cloud.vm.VirtualMachinePowerStateSyncImpl" />
 +
  
  <!--=======================================================================================================-->
  <!--                                                                                                       -->
@@@ -875,7 -915,9 +932,9 @@@
    <bean id="BAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" />
    <bean id="baremetalDhcpDaoImpl" class="com.cloud.baremetal.database.BaremetalDhcpDaoImpl" />
    <bean id="baremetalPxeDaoImpl" class="com.cloud.baremetal.database.BaremetalPxeDaoImpl" />
 -
 +  
+   <bean id="UcsManager" class="com.cloud.ucs.manager.UcsManagerImpl" />
+ 
    <bean id="AffinityGroupServiceImpl" class="org.apache.cloudstack.affinity.AffinityGroupServiceImpl"/>
    <bean id="DeploymentPlanningManager" class="com.cloud.deploy.DeploymentPlanningManagerImpl">
      <property name="Planners" value="#{deploymentPlanners.Adapters}" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/client/tomcatconf/componentContext.xml.in
----------------------------------------------------------------------
diff --cc client/tomcatconf/componentContext.xml.in
index 9ffecbb,1fbec61..2e01874
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@@ -91,7 -97,29 +97,29 @@@
  
    It determines whether or not a adapter is activated or how it is loaded in order in its managing provider,
  
 --->
 +--> 
+   <bean id="cloudStackImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl"/>
+   <bean id="s3ImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl"/>
+   <bean id="swiftImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl"/>
+   <bean id="solidFireDataStoreProvider"
+         class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>
+ 
+   <!--Storage Providers-->
+   <bean id="dataStoreProviderManager"
+         class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+     <property name="providers">
+       <list merge="true">
+         <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
+         <ref local="cloudStackImageStoreProviderImpl"/>
+         <ref local="s3ImageStoreProviderImpl"/>
+         <ref local="swiftImageStoreProviderImpl"/>
+         <ref local="solidFireDataStoreProvider"/>
+       </list>
+     </property>
+   </bean>
  
    <!-- Security adapters -->
    <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/client/tomcatconf/nonossComponentContext.xml.in
----------------------------------------------------------------------
diff --cc client/tomcatconf/nonossComponentContext.xml.in
index d4cc06d,ffa6281..fcd5b55
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@@ -176,8 -181,42 +181,42 @@@
      
    It determines whether or not a adapter is activated or how it is loaded in order in its managing provider,
       
 --->
 +--> 
  
+   <!--Motion Strategies-->
+   <bean id="vmwareStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.VmwareStorageMotionStrategy" />
+   <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl">
+     <property name="strategies">
+       <list>
+         <ref bean="ancientDataMotionStrategy"/>
+         <ref bean="xenserverStorageMotionStrategy"/>
+         <ref local="vmwareStorageMotionStrategy"/>
+       </list>
+     </property>
+   </bean>
+ 
+   <bean id="cloudStackImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl"/>
+   <bean id="s3ImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl"/>
+   <bean id="swiftImageStoreProviderImpl"
+         class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl"/>
+   <bean id="solidFireDataStoreProvider"
+         class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>
+ 
+   <!--Storage Providers-->
+   <bean id="dataStoreProviderManager"
+         class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+     <property name="providers">
+       <list merge="true">
+         <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
+         <ref bean="cloudStackImageStoreProviderImpl"/>
+         <ref bean="s3ImageStoreProviderImpl"/>
+         <ref bean="solidFireDataStoreProvider"/>
+       </list>
+     </property>
+   </bean>
+ 
    <!-- Security adapters -->
    <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList">
      <property name="Adapters">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/engine/components-api/src/com/cloud/storage/VolumeManager.java
----------------------------------------------------------------------
diff --cc engine/components-api/src/com/cloud/storage/VolumeManager.java
index 15f24f7,0000000..71741d5
mode 100644,000000..100644
--- a/engine/components-api/src/com/cloud/storage/VolumeManager.java
+++ b/engine/components-api/src/com/cloud/storage/VolumeManager.java
@@@ -1,122 -1,0 +1,121 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package com.cloud.storage;
 +
 +import java.util.Map;
 +
 +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
 +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
 +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 +
 +import com.cloud.agent.api.to.VirtualMachineTO;
 +import com.cloud.deploy.DeployDestination;
 +import com.cloud.exception.ConcurrentOperationException;
 +import com.cloud.exception.InsufficientStorageCapacityException;
 +import com.cloud.exception.ResourceAllocationException;
 +import com.cloud.exception.StorageUnavailableException;
 +import com.cloud.host.Host;
 +import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.storage.Volume.Type;
 +import com.cloud.user.Account;
 +import com.cloud.vm.DiskProfile;
 +import com.cloud.vm.VMInstanceVO;
 +import com.cloud.vm.VirtualMachine;
 +import com.cloud.vm.VirtualMachineProfile;
 +
 +public interface VolumeManager extends VolumeApiService {
- 
 +    VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId,
 +            Long destPoolClusterId, HypervisorType dataDiskHyperType)
 +            throws ConcurrentOperationException;
 +
 +    @Override
 +    VolumeVO uploadVolume(UploadVolumeCmd cmd)
 +            throws ResourceAllocationException;
 +
 +    VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId);
 +
 +    boolean volumeOnSharedStoragePool(VolumeVO volume);
 +
 +    boolean volumeInactive(Volume volume);
 +
 +    String getVmNameOnVolume(Volume volume);
 +
 +    @Override
 +    VolumeVO allocVolume(CreateVolumeCmd cmd)
 +            throws ResourceAllocationException;
 +
 +    @Override
 +    VolumeVO createVolume(CreateVolumeCmd cmd);
 +
 +    @Override
 +    VolumeVO resizeVolume(ResizeVolumeCmd cmd)
 +            throws ResourceAllocationException;
 +
 +    @Override
 +    boolean deleteVolume(long volumeId, Account caller)
 +            throws ConcurrentOperationException;
 +    
 +    void destroyVolume(VolumeVO volume);
 +
 +    DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, VMInstanceVO vm, Account owner);
 +    @Override
 +    Volume attachVolumeToVM(AttachVolumeCmd command);
 +
 +    @Override
 +    Volume detachVolumeFromVM(DetachVolumeCmd cmmd);
 +
 +    void release(VirtualMachineProfile profile);
 +
 +    void cleanupVolumes(long vmId) throws ConcurrentOperationException;
 +
 +    @Override
 +    Volume migrateVolume(MigrateVolumeCmd cmd);
 +
 +    void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost,
 +            Map<Volume, StoragePool> volumeToPool);
 +
 +    boolean storageMigration(
 +            VirtualMachineProfile vm,
 +            StoragePool destPool);
 +
 +    void prepareForMigration(
 +            VirtualMachineProfile vm,
 +            DeployDestination dest);
 +
 +    void prepare(VirtualMachineProfile vm,
 +            DeployDestination dest) throws StorageUnavailableException,
 +            InsufficientStorageCapacityException, ConcurrentOperationException;
 +
 +    boolean canVmRestartOnAnotherServer(long vmId);
 +
 +    DiskProfile allocateTemplatedVolume(Type type, String name,
 +            DiskOfferingVO offering, VMTemplateVO template, VMInstanceVO vm,
 +            Account owner);
 +
 +
 +    String getVmNameFromVolumeId(long volumeId);
 +
 +    String getStoragePoolOfVolume(long volumeId);
 +
 +    boolean validateVolumeSizeRange(long size);
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
----------------------------------------------------------------------
diff --cc engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
index e916a5c,300d932..30830b6
--- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@@ -43,32 -43,44 +43,43 @@@ public class ZoneWideStoragePoolAllocat
      PrimaryDataStoreDao _storagePoolDao;
      @Inject
      DataStoreManager dataStoreMgr;
 -
 -    @Override
 +	
 +	@Override
      protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
 -        Volume volume = _volumeDao.findById(dskCh.getVolumeId());
 +        Volume volume =  _volumeDao.findById(dskCh.getVolumeId());
          List<Volume> requestVolumes = new ArrayList<Volume>();
          requestVolumes.add(volume);
-         return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+ 
+         return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) &&
+                storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
 -    }
 -
 +	}
 +	
  	@Override
 -	protected List<StoragePool> select(DiskProfile dskCh,
 -			VirtualMachineProfile<? extends VirtualMachine> vmProfile,
 +    protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile,
  			DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
  	    s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
  		List<StoragePool> suitablePools = new ArrayList<StoragePool>();
- 		HypervisorType hypervisor = dskCh.getHypervisorType();
- 		if (hypervisor != null) {
-             if (hypervisor != HypervisorType.KVM && hypervisor != HypervisorType.VMware) {
-                 s_logger.debug("Only kvm, VMware hypervisors are enabled to support zone wide storage");
- 				return suitablePools;
+ 
+         List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
+ 
+         if (storagePools == null) {
+             storagePools = new ArrayList<StoragePoolVO>();
+         }
+ 
+         List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<StoragePoolVO>();
+ 
+         for (StoragePoolVO storagePool : storagePools) {
+             if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
+                 anyHypervisorStoragePools.add(storagePool);
 -            }
 -        }
 -
 +			}
 +		}
 +		
- 		List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
          List<StoragePoolVO> storagePoolsByHypervisor = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType());
+ 
          storagePools.retainAll(storagePoolsByHypervisor);
 -
 +	
+         storagePools.addAll(anyHypervisorStoragePools);
+ 
          // add remaining pools in zone, that did not match tags, to avoid set
          List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
          allPools.removeAll(storagePools);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --cc engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 3f6c4be,89313e4..58e0134
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@@ -115,8 -123,8 +124,8 @@@ public class VolumeServiceImpl implemen
  
      public VolumeServiceImpl() {
      }
 -
 +    
-     private class CreateVolumeContext<T> extends AsyncRpcConext<T> {
+     private class CreateVolumeContext<T> extends AsyncRpcContext<T> {
  
          private final DataObject volume;
          private final AsyncCallFuture<VolumeApiResult> future;
@@@ -136,9 -143,19 +145,19 @@@
          public AsyncCallFuture<VolumeApiResult> getFuture() {
              return this.future;
          }
 -
 +        
      }
 -
 +    
+     public ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore) {
+         DataStoreDriver dataStoreDriver = dataStore.getDriver();
+ 
+         if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
+             return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(volumeInfo);
+         }
+ 
+         return null;
+     }
+ 
      @Override
      public AsyncCallFuture<VolumeApiResult> createVolumeAsync(VolumeInfo volume, DataStore dataStore) {
          AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
@@@ -149,11 -166,11 +168,11 @@@
                  future);
          AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
          caller.setCallback(caller.getTarget().createVolumeCallback(null, null)).setContext(context);
 -
 +        
-         dataStore.getDriver().createAsync(volumeOnStore, caller);
+         dataStore.getDriver().createAsync(dataStore, volumeOnStore, caller);
          return future;
      }
 -
 +    
      protected Void createVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback,
              CreateVolumeContext<VolumeApiResult> context) {
          CreateCmdResult result = callback.getResult();
@@@ -172,14 -189,13 +191,14 @@@
          context.getFuture().complete(volResult);
          return null;
      }
 -
 +    
-     private class DeleteVolumeContext<T> extends AsyncRpcConext<T> {
+     private class DeleteVolumeContext<T> extends AsyncRpcContext<T> {
          private final VolumeObject volume;
          private final AsyncCallFuture<VolumeApiResult> future;
 -
 -        public DeleteVolumeContext(AsyncCompletionCallback<T> callback, VolumeObject volume,
 -                AsyncCallFuture<VolumeApiResult> future) {
 +        /**
 +         * @param callback
 +         */
 +        public DeleteVolumeContext(AsyncCompletionCallback<T> callback, VolumeObject volume, AsyncCallFuture<VolumeApiResult> future) {
              super(callback);
              this.volume = volume;
              this.future = future;
@@@ -234,11 -250,11 +253,11 @@@
          DeleteVolumeContext<VolumeApiResult> context = new DeleteVolumeContext<VolumeApiResult>(null, vo, future);
          AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
          caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)).setContext(context);
 -
 +        
-         volume.getDataStore().getDriver().deleteAsync(volume, caller);
+         volume.getDataStore().getDriver().deleteAsync(volume.getDataStore(), volume, caller);
          return future;
      }
 -
 +    
      public Void deleteVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult> callback,
              DeleteVolumeContext<VolumeApiResult> context) {
          CommandResult result = callback.getResult();
@@@ -263,10 -279,10 +282,10 @@@
  
      @Override
      public VolumeEntity getVolumeEntity(long volumeId) {
 -        return null;
 -    }
 +            return null;
 +        }
  
-     class CreateBaseImageContext<T> extends AsyncRpcConext<T> {
+     class CreateBaseImageContext<T> extends AsyncRpcContext<T> {
          private final VolumeInfo volume;
          private final PrimaryDataStore dataStore;
          private final TemplateInfo srcTemplate;
@@@ -282,8 -299,9 +302,9 @@@
              this.future = future;
              this.srcTemplate = srcTemplate;
              this.destObj = destObj;
+             this.templatePoolId = templatePoolId;
          }
 -
 +        
          public VolumeInfo getVolume() {
              return this.volume;
          }
@@@ -299,9 -317,14 +320,14 @@@
          public AsyncCallFuture<VolumeApiResult> getFuture() {
              return this.future;
          }
 -
 +        
+         public long getTemplatePoolId() {
+             return templatePoolId;
+         }
+ 
+ 
      }
 -
 +    
      private TemplateInfo waitForTemplateDownloaded(PrimaryDataStore store, TemplateInfo template) {
          int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(
                  configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
@@@ -325,41 -348,47 +351,47 @@@
      @DB
      protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStore, TemplateInfo template,
              AsyncCallFuture<VolumeApiResult> future) {
 -
 +       
          DataObject templateOnPrimaryStoreObj = dataStore.create(template);
+ 
+         VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId());
+         if (templatePoolRef == null) {
+             throw new CloudRuntimeException("Failed to find template " + template.getUniqueName()
+                     + " in VMTemplateStoragePool");
+         }
+         long templatePoolRefId = templatePoolRef.getId();
          CreateBaseImageContext<CreateCmdResult> context = new CreateBaseImageContext<CreateCmdResult>(null, volume,
-                 dataStore, template, future, templateOnPrimaryStoreObj);
+                 dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId);
          AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
          caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)).setContext(context);
 -
 +        
-         try {
-         templateOnPrimaryStoreObj.processEvent(Event.CreateOnlyRequested);
-         } catch (Exception e) {
-             s_logger.info("Multiple threads are trying to copy template to primary storage, current thread should just wait");
-             try {
-                 templateOnPrimaryStoreObj = waitForTemplateDownloaded(dataStore, template);
-             } catch (Exception e1) {
-                 s_logger.debug("wait for template:" + template.getId() + " downloading finished, but failed");
-                 VolumeApiResult result = new VolumeApiResult(volume);
-                 result.setResult(e1.toString());
-                 future.complete(result);
-                 return;
-             }
-             if (templateOnPrimaryStoreObj == null) {
-                 VolumeApiResult result = new VolumeApiResult(volume);
-                 result.setResult("wait for template:" + template.getId() + " downloading finished, but failed");
-                 future.complete(result);
-                 return;
-             } else {
-                 s_logger.debug("waiting for template:" + template.getId() + " downloading finished, success");
+         int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(
+                 configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
+         templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds);
+         if (templatePoolRef == null) {
+             templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId());
+             if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready ) {
+                 s_logger.info("Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying");
                  createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future);
                  return;
              }
+             throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId);
          }
 -
 +     
          try {
+             // lock acquired
+             if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready ) {
+                 s_logger.info("Template " + template.getUniqueName() + " is already copied to primary storage, skip copying");
+                 createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future);
+                 return;
+             }
+             // remove the leftover hanging entry
+             dataStore.delete(templateOnPrimaryStoreObj);
+             // create a new entry to restart copying process
+             templateOnPrimaryStoreObj = dataStore.create(template);
+             templateOnPrimaryStoreObj.processEvent(Event.CreateOnlyRequested);
              motionSrv.copyAsync(template, templateOnPrimaryStoreObj, caller);
-         } catch (Exception e) {
+         } catch (Throwable e) {
              s_logger.debug("failed to create template on storage", e);
              templateOnPrimaryStoreObj.processEvent(Event.OperationFailed);
              VolumeApiResult result = new VolumeApiResult(volume);
@@@ -383,13 -414,14 +417,14 @@@
              future.complete(res);
              return null;
          }
 -
 +        
          templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer());
+         _tmpltPoolDao.releaseFromLockTable(context.getTemplatePoolId());
          createVolumeFromBaseImageAsync(context.volume, templateOnPrimaryStoreObj, context.dataStore, future);
          return null;
      }
 -
 +    
-     private class CreateVolumeFromBaseImageContext<T> extends AsyncRpcConext<T> {
+     private class CreateVolumeFromBaseImageContext<T> extends AsyncRpcContext<T> {
          private final DataObject vo;
          private final AsyncCallFuture<VolumeApiResult> future;
          private final DataObject templateOnStore;
@@@ -543,8 -575,8 +578,8 @@@
          newVol.setPodId(pool.getPodId());
          return volDao.persist(newVol);
      }
 -
 +    
-     private class CopyVolumeContext<T> extends AsyncRpcConext<T> {
+     private class CopyVolumeContext<T> extends AsyncRpcContext<T> {
          final VolumeInfo srcVolume;
          final VolumeInfo destVolume;
          final AsyncCallFuture<VolumeApiResult> future;
@@@ -915,11 -947,11 +950,11 @@@
          AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
          caller.setCallback(caller.getTarget().registerVolumeCallback(null, null));
          caller.setContext(context);
 -
 +        
-         store.getDriver().createAsync(volumeOnStore, caller);
+         store.getDriver().createAsync(store, volumeOnStore, caller);
          return future;
      }
 -
 +    
      protected Void registerVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback,
              CreateVolumeContext<VolumeApiResult> context) {
          CreateCmdResult result = callback.getResult();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index d5cbe97,914017c..35c862c
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@@ -56,7 -56,15 +56,8 @@@ import java.util.regex.Pattern
  import javax.ejb.Local;
  import javax.naming.ConfigurationException;
  
 -import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
 -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
 -import org.apache.cloudstack.storage.to.VolumeObjectTO;
 -import org.apache.cloudstack.utils.qemu.QemuImg;
 -import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 -import org.apache.cloudstack.utils.qemu.QemuImgException;
 -import org.apache.cloudstack.utils.qemu.QemuImgFile;
  import org.apache.log4j.Logger;
+ import org.apache.commons.io.FileUtils;
  import org.libvirt.Connect;
  import org.libvirt.Domain;
  import org.libvirt.DomainBlockStats;
@@@ -4861,24 -4847,18 +4861,18 @@@ ServerResource 
          }
      }
  
-     private Pair<Double, Double> getNicStats(String nicName) {
-         double rx = 0.0;
-         String rxFile = "/sys/class/net/" + nicName + "/statistics/rx_bytes";
-         String rxContent = FileUtil.readFileAsString(rxFile);
-         if (rxContent == null) {
-             s_logger.warn("Failed to read the rx_bytes for " + nicName + " from " + rxFile);
+     static Pair<Double, Double> getNicStats(String nicName) {
+         return new Pair<Double, Double>(readDouble(nicName, "rx_bytes"), readDouble(nicName, "tx_bytes"));
 -    }
 +        }
-         rx = Double.parseDouble(rxContent);
  
-         double tx = 0.0;
-         String txFile = "/sys/class/net/" + nicName + "/statistics/tx_bytes";
-         String txContent = FileUtil.readFileAsString(txFile);
-         if (txContent == null) {
-             s_logger.warn("Failed to read the tx_bytes for " + nicName + " from " + txFile);
+     static double readDouble(String nicName, String fileName) {
+         final String path = "/sys/class/net/" + nicName + "/statistics/" + fileName;
+         try {
+             return Double.parseDouble(FileUtils.readFileToString(new File(path)));
+         } catch (IOException ioe) {
+             s_logger.warn("Failed to read the " + fileName + " for " + nicName + " from " + path, ioe);
+             return 0.0;
          }
-         tx = Double.parseDouble(txContent);
- 
-         return new Pair<Double, Double>(rx, tx);
      }
  
      private Answer execute(NetworkRulesSystemVmCommand cmd) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 0606f38,fee3e0a..25f69fd
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@@ -83,8 -74,20 +83,9 @@@ import com.cloud.utils.Pair
  import com.cloud.utils.StringUtils;
  import com.cloud.utils.Ternary;
  import com.cloud.utils.script.Script;
+ import com.cloud.utils.exception.CloudRuntimeException;
  import com.cloud.vm.VirtualMachine;
  import com.cloud.vm.snapshot.VMSnapshot;
 -import com.vmware.vim25.ManagedObjectReference;
 -import com.vmware.vim25.TaskEvent;
 -import com.vmware.vim25.TaskInfo;
 -import com.vmware.vim25.VirtualDeviceConfigSpec;
 -import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
 -import com.vmware.vim25.VirtualDisk;
 -import com.vmware.vim25.VirtualLsiLogicController;
 -import com.vmware.vim25.VirtualMachineConfigSpec;
 -import com.vmware.vim25.VirtualMachineFileInfo;
 -import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
 -import com.vmware.vim25.VirtualSCSISharing;
  
  public class VmwareStorageManagerImpl implements VmwareStorageManager {
      private static final Logger s_logger = Logger.getLogger(VmwareStorageManagerImpl.class);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 0e5997e,286eb48..7c7e0b4
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@@ -43,49 -43,6 +43,60 @@@ import java.util.UUID
  import javax.inject.Inject;
  import javax.naming.ConfigurationException;
  
 +import org.apache.log4j.Logger;
 +import org.apache.log4j.NDC;
 +
 +import com.google.gson.Gson;
 +import com.vmware.vim25.AboutInfo;
 +import com.vmware.vim25.ClusterDasConfigInfo;
 +import com.vmware.vim25.ComputeResourceSummary;
 +import com.vmware.vim25.DatastoreSummary;
 +import com.vmware.vim25.DynamicProperty;
 +import com.vmware.vim25.GuestInfo;
++import com.vmware.vim25.GuestOsDescriptor;
 +import com.vmware.vim25.HostCapability;
 +import com.vmware.vim25.HostFirewallInfo;
 +import com.vmware.vim25.HostFirewallRuleset;
++import com.vmware.vim25.HostHostBusAdapter;
++import com.vmware.vim25.HostInternetScsiHba;
++import com.vmware.vim25.HostInternetScsiHbaAuthenticationProperties;
++import com.vmware.vim25.HostInternetScsiHbaStaticTarget;
++import com.vmware.vim25.HostInternetScsiTargetTransport;
++import com.vmware.vim25.HostScsiDisk;
++import com.vmware.vim25.HostScsiTopology;
++import com.vmware.vim25.HostScsiTopologyInterface;
++import com.vmware.vim25.HostScsiTopologyLun;
++import com.vmware.vim25.HostScsiTopologyTarget;
 +import com.vmware.vim25.ManagedObjectReference;
 +import com.vmware.vim25.ObjectContent;
 +import com.vmware.vim25.OptionValue;
 +import com.vmware.vim25.PerfCounterInfo;
 +import com.vmware.vim25.PerfEntityMetric;
 +import com.vmware.vim25.PerfEntityMetricBase;
 +import com.vmware.vim25.PerfMetricId;
 +import com.vmware.vim25.PerfMetricIntSeries;
 +import com.vmware.vim25.PerfMetricSeries;
 +import com.vmware.vim25.PerfQuerySpec;
 +import com.vmware.vim25.PerfSampleInfo;
 +import com.vmware.vim25.RuntimeFaultFaultMsg;
 +import com.vmware.vim25.ToolsUnavailableFaultMsg;
 +import com.vmware.vim25.VimPortType;
 +import com.vmware.vim25.VirtualDevice;
 +import com.vmware.vim25.VirtualDeviceConfigSpec;
 +import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
 +import com.vmware.vim25.VirtualDisk;
 +import com.vmware.vim25.VirtualEthernetCard;
 +import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
 +import com.vmware.vim25.VirtualLsiLogicController;
 +import com.vmware.vim25.VirtualMachineConfigSpec;
 +import com.vmware.vim25.VirtualMachineFileInfo;
 +import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
 +import com.vmware.vim25.VirtualMachinePowerState;
 +import com.vmware.vim25.VirtualMachineRelocateSpec;
 +import com.vmware.vim25.VirtualMachineRelocateSpecDiskLocator;
 +import com.vmware.vim25.VirtualMachineRuntimeInfo;
 +import com.vmware.vim25.VirtualSCSISharing;
 +
  import org.apache.cloudstack.storage.command.DeleteCommand;
  import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
  import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
@@@ -255,8 -214,8 +266,10 @@@ import com.cloud.hypervisor.vmware.mo.C
  import com.cloud.hypervisor.vmware.mo.DatacenterMO;
  import com.cloud.hypervisor.vmware.mo.DatastoreMO;
  import com.cloud.hypervisor.vmware.mo.DiskControllerType;
++import com.cloud.hypervisor.vmware.mo.HostDatastoreSystemMO;
  import com.cloud.hypervisor.vmware.mo.HostFirewallSystemMO;
  import com.cloud.hypervisor.vmware.mo.HostMO;
++import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO;
  import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
  import com.cloud.hypervisor.vmware.mo.NetworkDetails;
  import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
@@@ -3240,10 -3258,10 +3260,10 @@@ public class VmwareResource implements 
          HashMap<String, VmStatsEntry> vmStatsMap = null;
  
          try {
 -            HashMap<String, State> newStates = getVmStates();
 +            HashMap<String, PowerState> newStates = getVmStates();
  
              List<String> requestedVmNames = cmd.getVmNames();
-             List<String> vmNames = new ArrayList();
+             List<String> vmNames = new ArrayList<String>();
  
              if (requestedVmNames != null) {
                  for (String vmName : requestedVmNames) {
@@@ -3953,6 -4022,198 +4024,200 @@@
          }
      }
  
+     private ManagedObjectReference createVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress,
+             int storagePortNumber, String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception {
+         VmwareContext context = getServiceContext();
+         ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
+         ClusterMO cluster = new ClusterMO(context, morCluster);
+         List<Pair<ManagedObjectReference, String>> lstHosts = cluster.getClusterHosts();
+ 
+         HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget();
+ 
+         target.setAddress(storageIpAddress);
+         target.setPort(storagePortNumber);
+         target.setIScsiName(iqn);
+ 
+         HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties();
+ 
+         String strAuthType = "chapRequired";
+ 
+         auth.setChapAuthEnabled(true);
+         auth.setChapInherited(false);
+         auth.setChapAuthenticationType(strAuthType);
+         auth.setChapName(chapName);
+         auth.setChapSecret(chapSecret);
+         auth.setMutualChapInherited(false);
+         auth.setMutualChapAuthenticationType(strAuthType);
+         auth.setMutualChapName(mutualChapName);
+         auth.setMutualChapSecret(mutualChapSecret);
+ 
+         target.setAuthenticationProperties(auth);
+ 
+         final List<HostInternetScsiHbaStaticTarget> lstTargets = new ArrayList<HostInternetScsiHbaStaticTarget>();
+ 
+         lstTargets.add(target);
+ 
+         HostDatastoreSystemMO hostDatastoreSystem = null;
+         HostStorageSystemMO hostStorageSystem = null;
+ 
+         final List<Thread> threads = new ArrayList<Thread>();
+         final List<Exception> exceptions = new ArrayList<Exception>();
+ 
+         for (Pair<ManagedObjectReference, String> hostPair : lstHosts) {
+             HostMO host = new HostMO(context, hostPair.first());
+             hostDatastoreSystem = host.getHostDatastoreSystemMO();
+             hostStorageSystem = host.getHostStorageSystemMO();
+ 
+             boolean iScsiHbaConfigured = false;
+ 
+             for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
+                 if (hba instanceof HostInternetScsiHba) {
+                     // just finding an instance of HostInternetScsiHba means that we have found at least one configured iSCSI HBA
+                     // at least one iSCSI HBA must be configured before a CloudStack user can use this host for iSCSI storage
+                     iScsiHbaConfigured = true;
+ 
+                     final String iScsiHbaDevice = hba.getDevice();
+ 
+                     final HostStorageSystemMO hss = hostStorageSystem;
+ 
+                     threads.add(new Thread() {
++                        @Override
+                         public void run() {
+                             try {
+                                 hss.addInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
+ 
+                                 hss.rescanHba(iScsiHbaDevice);
+                             }
+                             catch (Exception ex) {
+                                 synchronized (exceptions) {
+                                     exceptions.add(ex);
+                                 }
+                             }
+                         }
+                     });
+                 }
+             }
+ 
+             if (!iScsiHbaConfigured) {
+                 throw new Exception("An iSCSI HBA must be configured before a host can use iSCSI storage.");
+             }
+         }
+ 
+         for (Thread thread : threads) {
+             thread.start();
+         }
+ 
+         for (Thread thread : threads) {
+             thread.join();
+         }
+ 
+         if (exceptions.size() > 0) {
+             throw new Exception(exceptions.get(0).getMessage());
+         }
+ 
+         ManagedObjectReference morDs = hostDatastoreSystem.findDatastore(iqn);
+ 
+         if (morDs != null) {
+             return morDs;
+         }
+ 
+         List<HostScsiDisk> lstHostScsiDisks = hostDatastoreSystem.queryAvailableDisksForVmfs();
+ 
+         HostScsiDisk hostScsiDisk = getHostScsiDisk(hostStorageSystem.getStorageDeviceInfo().getScsiTopology(), lstHostScsiDisks, iqn);
+ 
+         if (hostScsiDisk == null) {
+             throw new Exception("A relevant SCSI disk could not be located to use to create a datastore.");
+         }
+ 
+         return hostDatastoreSystem.createVmfsDatastore(datastoreName, hostScsiDisk);
+     }
+ 
+     // the purpose of this method is to find the HostScsiDisk in the passed-in array that exists (if any) because
+     // we added the static iqn to an iSCSI HBA
+     private static HostScsiDisk getHostScsiDisk(HostScsiTopology hst, List<HostScsiDisk> lstHostScsiDisks, String iqn) {
+         for (HostScsiTopologyInterface adapter : hst.getAdapter()) {
+             if (adapter.getTarget() != null) {
+                 for (HostScsiTopologyTarget target : adapter.getTarget()) {
+                     if (target.getTransport() instanceof HostInternetScsiTargetTransport) {
+                         String iScsiName = ((HostInternetScsiTargetTransport)target.getTransport()).getIScsiName();
+ 
+                         if (iqn.equals(iScsiName)) {
+                             for (HostScsiDisk hostScsiDisk : lstHostScsiDisks) {
+                                 for (HostScsiTopologyLun hstl : target.getLun()) {
+                                     if (hstl.getScsiLun().contains(hostScsiDisk.getUuid())) {
+                                         return hostScsiDisk;
+                                     }
+                                 }
+                             }
+                         }
+                     }
+                 }
+             }
+         }
+ 
+         return null;
+     }
+ 
+     private void deleteVmfsDatastore(VmwareHypervisorHost hyperHost, String volumeUuid,
+             String storageIpAddress, int storagePortNumber, String iqn) throws Exception {
+         // hyperHost.unmountDatastore(volumeUuid);
+ 
+         VmwareContext context = getServiceContext();
+         ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
+         ClusterMO cluster = new ClusterMO(context, morCluster);
+         List<Pair<ManagedObjectReference, String>> lstHosts = cluster.getClusterHosts();
+ 
+         HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget();
+ 
+         target.setAddress(storageIpAddress);
+         target.setPort(storagePortNumber);
+         target.setIScsiName(iqn);
+ 
+         final List<HostInternetScsiHbaStaticTarget> lstTargets = new ArrayList<HostInternetScsiHbaStaticTarget>();
+ 
+         lstTargets.add(target);
+ 
+         final List<Thread> threads = new ArrayList<Thread>();
+         final List<Exception> exceptions = new ArrayList<Exception>();
+ 
+         for (Pair<ManagedObjectReference, String> hostPair : lstHosts) {
+             final HostMO host = new HostMO(context, hostPair.first());
+             final HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO();
+ 
+             for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
+                 if (hba instanceof HostInternetScsiHba) {
+                     final String iScsiHbaDevice = hba.getDevice();
+ 
+                     Thread thread = new Thread() {
++                        @Override
+                         public void run() {
+                             try {
+                                 hostStorageSystem.removeInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
+ 
+                                 hostStorageSystem.rescanHba(iScsiHbaDevice);
+                             }
+                             catch (Exception ex) {
+                                 exceptions.add(ex);
+                             }
+                         }
+                     };
+ 
+                     threads.add(thread);
+ 
+                     thread.start();
+                 }
+             }
+         }
+ 
+         for (Thread thread : threads) {
+             thread.join();
+         }
+ 
+         if (exceptions.size() > 0) {
+             throw new Exception(exceptions.get(0).getMessage());
+         }
+     }
+ 
      protected Answer execute(AttachIsoCommand cmd) {
          if (s_logger.isInfoEnabled()) {
              s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd));

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index c947af6,e07df0b..a079fe2
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@@ -262,85 -298,8 +298,9 @@@ import com.cloud.utils.exception.CloudR
  import com.cloud.utils.net.NetUtils;
  import com.cloud.vm.DiskProfile;
  import com.cloud.vm.VirtualMachine;
 +import com.cloud.vm.VirtualMachine.PowerState;
  import com.cloud.vm.VirtualMachine.State;
  import com.cloud.vm.snapshot.VMSnapshot;
- import com.trilead.ssh2.SCPClient;
- import com.xensource.xenapi.Bond;
- import com.xensource.xenapi.Connection;
- import com.xensource.xenapi.Console;
- import com.xensource.xenapi.Host;
- import com.xensource.xenapi.HostCpu;
- import com.xensource.xenapi.HostMetrics;
- import com.xensource.xenapi.Network;
- import com.xensource.xenapi.PBD;
- import com.xensource.xenapi.PIF;
- import com.xensource.xenapi.PIF.Record;
- import com.xensource.xenapi.Pool;
- import com.xensource.xenapi.SR;
- import com.xensource.xenapi.Session;
- import com.xensource.xenapi.Task;
- import com.xensource.xenapi.Types;
- import com.xensource.xenapi.Types.BadAsyncResult;
- import com.xensource.xenapi.Types.BadServerResponse;
- import com.xensource.xenapi.Types.ConsoleProtocol;
- import com.xensource.xenapi.Types.IpConfigurationMode;
- import com.xensource.xenapi.Types.OperationNotAllowed;
- import com.xensource.xenapi.Types.SrFull;
- import com.xensource.xenapi.Types.VbdType;
- import com.xensource.xenapi.Types.VmBadPowerState;
- import com.xensource.xenapi.Types.VmPowerState;
- import com.xensource.xenapi.Types.XenAPIException;
- import com.xensource.xenapi.VBD;
- import com.xensource.xenapi.VBDMetrics;
- import com.xensource.xenapi.VDI;
- import com.xensource.xenapi.VIF;
- import com.xensource.xenapi.VLAN;
- import com.xensource.xenapi.VM;
- import com.xensource.xenapi.VMGuestMetrics;
- import com.xensource.xenapi.XenAPIObject;
- import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
- import org.apache.log4j.Logger;
- import org.apache.xmlrpc.XmlRpcException;
- import org.w3c.dom.Document;
- import org.w3c.dom.Node;
- import org.w3c.dom.NodeList;
- import org.xml.sax.InputSource;
- 
- import javax.ejb.Local;
- import javax.naming.ConfigurationException;
- import javax.xml.parsers.DocumentBuilderFactory;
- import java.beans.BeanInfo;
- import java.beans.IntrospectionException;
- import java.beans.Introspector;
- import java.beans.PropertyDescriptor;
- import java.io.BufferedReader;
- import java.io.File;
- import java.io.FileInputStream;
- import java.io.IOException;
- import java.io.InputStream;
- import java.io.InputStreamReader;
- import java.io.StringReader;
- import java.lang.reflect.InvocationTargetException;
- import java.net.URI;
- import java.net.URISyntaxException;
- import java.net.URL;
- import java.net.URLConnection;
- import java.util.ArrayList;
- import java.util.Arrays;
- import java.util.Collections;
- import java.util.Date;
- import java.util.HashMap;
- import java.util.HashSet;
- import java.util.Iterator;
- import java.util.LinkedList;
- import java.util.List;
- import java.util.Map;
- import java.util.Properties;
- import java.util.Queue;
- import java.util.Random;
- import java.util.Set;
- import java.util.UUID;
  
  /**
   * CitrixResourceBase encapsulates the calls to the XenServer Xapi process
@@@ -3041,16 -3006,13 +3007,13 @@@ public abstract class CitrixResourceBas
                  try {
                      host_uuid = host.getUuid(conn);
                  } catch (BadServerResponse e) {
-                     // TODO Auto-generated catch block
-                     e.printStackTrace();
+                     s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                  } catch (XenAPIException e) {
-                     // TODO Auto-generated catch block
-                     e.printStackTrace();
+                     s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                  } catch (XmlRpcException e) {
-                     // TODO Auto-generated catch block
-                     e.printStackTrace();
+                     s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                  }
 -                vmStates.put(record.nameLabel, new Pair<String, State>(host_uuid, state));
 +                vmStates.put(record.nameLabel, new Pair<String, PowerState>(host_uuid, state));
              }
          }
  
@@@ -5782,31 -5718,24 +5719,24 @@@
  
      private List<Pair<String, Long>> ovsFullSyncStates() {
          Connection conn = getConnection();
-         try {
 -        String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.uuid);
 -        String [] logs = result != null ?result.split(";"): new String [0];
 -        List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>();
 -        for (String log: logs){
 -            String [] info = log.split(",");
 -            if (info.length != 5) {
 -                s_logger.warn("Wrong element number in ovs log(" + log +")");
 -                continue;
 -            }
 +            String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.uuid);
 +            String [] logs = result != null ?result.split(";"): new String [0];
 +            List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>();
 +            for (String log: logs){
 +                String [] info = log.split(",");
 +                if (info.length != 5) {
 +                    s_logger.warn("Wrong element number in ovs log(" + log +")");
 +                    continue;
 +                }
  
 -            //','.join([bridge, vmName, vmId, seqno, tag])
 -            try {
 -                states.add(new Pair<String,Long>(info[0], Long.parseLong(info[3])));
 -            } catch (NumberFormatException nfe) {
 -                states.add(new Pair<String,Long>(info[0], -1L));
 +                //','.join([bridge, vmName, vmId, seqno, tag])
 +                try {
 +                    states.add(new Pair<String,Long>(info[0], Long.parseLong(info[3])));
 +                } catch (NumberFormatException nfe) {
 +                    states.add(new Pair<String,Long>(info[0], -1L));
 +                }
              }
- 
 -        }
 -        return states;
 +            return states;
-         } catch (Exception e) {
-             e.printStackTrace();
-         }
- 
-         return null;
      }
  
      private OvsSetTagAndFlowAnswer execute(OvsSetTagAndFlowCommand cmd) {
@@@ -5850,13 -5783,19 +5784,19 @@@
              PIF pif = nw.getPif(conn);
              Record pifRec = pif.getRecord(conn);
              s_logger.debug("PIF object:" + pifRec.uuid + "(" + pifRec.device + ")");
 -            return new OvsFetchInterfaceAnswer(cmd, true, "Interface " + pifRec.device + " retrieved successfully",
 +            return new OvsFetchInterfaceAnswer(cmd, true, "Interface " + pifRec.device + " retrieved successfully", 
                      pifRec.IP, pifRec.netmask, pifRec.MAC);
-         } catch (Exception e) {
-             e.printStackTrace();
+         } catch (BadServerResponse e) {
+             s_logger.error("An error occurred while fetching the interface for " +
+                     label + " on host " + _host.ip , e);
+             return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
+         } catch (XenAPIException e) {
+             s_logger.error("An error occurred while fetching the interface for " +
+                     label + " on host " + _host.ip , e);
+             return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
+         } catch (XmlRpcException e) {
              s_logger.error("An error occurred while fetching the interface for " +
-                     label + " on host " + _host.ip + ":" + e.toString() + 
-                     "(" + e.getClass() + ")");
+                     label + " on host " + _host.ip, e);
              return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
          }
      }
@@@ -6455,20 -6400,31 +6401,31 @@@
                  if( result.indexOf("<UUID>") != -1) {
                      pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
                  }
-                 if( pooluuid == null || pooluuid.length() != 36) {
-                     sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), poolId, type, "user", true,
+ 
+                 if (pooluuid == null || pooluuid.length() != 36)
+                 {
+                     sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true,
                              smConfig);
+ 
+                     created[0] = true; // note that the SR was created (as opposed to introduced)
                  } else {
-                     sr = SR.introduce(conn, pooluuid, pool.getUuid(), poolId,
+                     sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel,
                              type, "user", true, smConfig);
-                     Pool.Record pRec = XenServerConnectionPool.getPoolRecord(conn);
+ 
+                     Set<Host> setHosts = Host.getAll(conn);
+ 
+                     for (Host currentHost : setHosts) {
 -                        PBD.Record rec = new PBD.Record();
 +                    PBD.Record rec = new PBD.Record();
+ 
 -                        rec.deviceConfig = deviceConfig;
 +                    rec.deviceConfig = deviceConfig;
-                     rec.host = pRec.master;
+                         rec.host = currentHost;
 -                        rec.SR = sr;
 +                    rec.SR = sr;
+ 
 -                        PBD pbd = PBD.create(conn, rec);
 +                    PBD pbd = PBD.create(conn, rec);
+ 
 -                        pbd.plug(conn);
 -                    }
 +                    pbd.plug(conn);
 +                }
+                 }
                  sr.scan(conn);
                  return sr;
              } catch (XenAPIException e) {


[03/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3255: UI > VPC section - IP Address - Load Balancing - autoscale - fix a bug that failed to get zoneid for createAutoScaleVmProfile API.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/dfb2e1d4
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/dfb2e1d4
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/dfb2e1d4

Branch: refs/heads/vmsync
Commit: dfb2e1d4f5bbb4e1be0160a4a65dc324ead27fbb
Parents: 7892258
Author: Jessica Wang <je...@apache.org>
Authored: Fri Jun 28 12:16:22 2013 -0700
Committer: Jessica Wang <je...@apache.org>
Committed: Fri Jun 28 12:19:28 2013 -0700

----------------------------------------------------------------------
 ui/scripts/autoscaler.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dfb2e1d4/ui/scripts/autoscaler.js
----------------------------------------------------------------------
diff --git a/ui/scripts/autoscaler.js b/ui/scripts/autoscaler.js
index 05011a3..15a9dac 100644
--- a/ui/scripts/autoscaler.js
+++ b/ui/scripts/autoscaler.js
@@ -1069,7 +1069,7 @@
 					var apiCmd, apiCmdRes;							
 					if(!('multiRules' in args.context)) { //from a new LB 	
             var data = {
-						  zoneid: args.context.networks[0].zoneid,
+						  zoneid: args.context.ipAddresses[0].zoneid, //args.context.networks[0] doesn't have zoneid property, so use args.context.ipAddresses[0] instead
 							serviceofferingid: args.data.serviceOfferingId,
 							templateid: args.data.templateNames,
 							destroyvmgraceperiod: args.data.destroyVMgracePeriod,


[20/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Group storage subsystem components for spring

Breaking down storage components among oss, nonoss and simulator
contexts. The default components are loaded by

OSS - applicationContext + componentContext
NonOSS - applicationContext + nonossComponentContext
Simulator - applicationContext + simulatorComponentContext

provider beans are are selectively overridden for simpler configuration.
Where possible beans are loaded by local reference.

<list merge=true> does not unfortunately work perfectly for bean merging
the providers causing a bit of bloat. Explore for later.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f1134da8
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f1134da8
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f1134da8

Branch: refs/heads/vmsync
Commit: f1134da8df122aeb95cb59f3aa47438709efd30f
Parents: fd867d5
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Wed Jun 26 18:59:59 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:53:41 2013 +0530

----------------------------------------------------------------------
 client/pom.xml                                  |   7 +-
 client/tomcatconf/applicationContext.xml.in     | 179 ++++++++++++-------
 client/tomcatconf/componentContext.xml.in       |  27 ++-
 client/tomcatconf/nonossComponentContext.xml.in |  31 +++-
 .../tomcatconf/simulatorComponentContext.xml.in |  49 +++--
 developer/pom.xml                               |   6 +
 .../storage/motion/DataMotionServiceImpl.java   |  18 +-
 .../test/resource/storageContext.xml            |   3 +-
 .../storage/datastore/DataStoreManagerImpl.java |  29 +--
 .../provider/DataStoreProviderManagerImpl.java  |  36 ++--
 10 files changed, 255 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/client/pom.xml
----------------------------------------------------------------------
diff --git a/client/pom.xml b/client/pom.xml
index d1eeb3b..222c520 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -273,7 +273,12 @@
         <groupId>org.apache.cloudstack</groupId>
         <artifactId>cloud-plugin-storage-image-swift</artifactId>
         <version>${project.version}</version>
-    </dependency>            
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-plugin-storage-image-simulator</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>org.apache.cloudstack</groupId>
       <artifactId>cloud-plugin-syslog-alerts</artifactId>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in
index 5c61b4e2..3d5d4fa 100644
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@ -17,16 +17,14 @@
   under the License.
 -->
 <beans xmlns="http://www.springframework.org/schema/beans"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
-  xmlns:context="http://www.springframework.org/schema/context"
-  xmlns:tx="http://www.springframework.org/schema/tx" 
-  xmlns:aop="http://www.springframework.org/schema/aop"
-  xsi:schemaLocation="http://www.springframework.org/schema/beans
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
                       http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
-                      http://www.springframework.org/schema/tx 
-                      http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
-                      http://www.springframework.org/schema/aop
-                      http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
+
+
+
+
                       http://www.springframework.org/schema/context
                       http://www.springframework.org/schema/context/spring-context-3.0.xsd">                     
 
@@ -613,7 +611,96 @@
   <bean id="OvmGuru" class="com.cloud.ovm.hypervisor.OvmGuru">
     <property name="name" value="OvmGuru"/>
   </bean>
-  
+
+
+  <!--=====================================================================================================-->
+  <!--                                                                                                     -->
+  <!--                           Storage Subsystem Components and Helpers                                  -->
+  <!--                                                                                                     -->
+  <!--=====================================================================================================-->
+
+  <!--Filesystem types-->
+  <bean id="iSCSI" class="org.apache.cloudstack.storage.datastore.type.ISCSI" />
+  <bean id="networkFileSystem" class="org.apache.cloudstack.storage.datastore.type.NetworkFileSystem" />
+
+  <!--Image formats-->
+  <bean id="ISO" class="org.apache.cloudstack.storage.image.format.ISO" />
+  <bean id="OVA" class="org.apache.cloudstack.storage.image.format.OVA" />
+  <bean id="QCOW2" class="org.apache.cloudstack.storage.image.format.QCOW2" />
+  <bean id="VHD" class="org.apache.cloudstack.storage.image.format.VHD" />
+  <bean id="unknown" class="org.apache.cloudstack.storage.image.format.Unknown" />
+
+  <!--Data Store Services -->
+  <bean id="snapshotServiceImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotServiceImpl"
+        depends-on="snapshotStateMachineManagerImpl, snapshotDataFactoryImpl, dataStoreManagerImpl, dataMotionServiceImpl, objectInDataStoreManagerImpl"/>
+  <bean id="templateServiceImpl" class="org.apache.cloudstack.storage.image.TemplateServiceImpl"
+        depends-on="dataObjectManagerImpl, dataStoreManagerImpl, dataMotionServiceImpl, objectInDataStoreManagerImpl, defaultEndPointSelector, templateDataFactoryImpl"/>
+  <bean id="volumeServiceImpl" class="org.apache.cloudstack.storage.volume.VolumeServiceImpl"
+        depends-on="snapshotManagerImpl, dataMotionServiceImpl"/>
+
+  <bean id="xenserverSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" />
+
+  <!--Data Store Factory-->
+  <bean id="templateDataFactoryImpl" class="org.apache.cloudstack.storage.image.TemplateDataFactoryImpl" />
+  <bean id="snapshotDataFactoryImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotDataFactoryImpl"
+        depends-on="dataStoreManagerImpl, snapshotDataStoreDaoImpl, volumeDataFactoryImpl"/>
+  <bean id="volumeDataFactoryImpl" class="org.apache.cloudstack.storage.volume.VolumeDataFactoryImpl" />
+
+  <bean id="objectInDataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.ObjectInDataStoreManagerImpl" />
+  <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />
+
+  <!--Data Store Helpers-->
+  <bean id="primaryDataStoreHelper" class="org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper" />
+  <bean id="imageStoreHelper" class="org.apache.cloudstack.storage.image.datastore.ImageStoreHelper" />
+  <bean id="imageFormatHelper" class="org.apache.cloudstack.storage.image.format.ImageFormatHelper" />
+
+  <bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" />
+  <bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl"  />
+  <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />
+
+  <bean id="snapshotStateMachineManagerImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManagerImpl" />
+  <bean id="defaultEndPointSelector" class="org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector" />
+
+
+  <bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" />
+  <bean id="xenserverStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.XenServerStorageMotionStrategy" />
+
+  <!--Data Motion Services-->
+  <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl">
+    <property name="strategies">
+      <list>
+        <ref local="ancientDataMotionStrategy"/>
+        <ref local="xenserverStorageMotionStrategy"/>
+      </list>
+    </property>
+  </bean>
+
+  <!--
+    Data Store Provider Manager
+  -->
+  <bean id="primaryDataStoreProviderMgr"
+        class="org.apache.cloudstack.storage.datastore.manager.PrimaryDataStoreProviderManagerImpl"/>
+  <bean id="imageStoreProviderMgr" class="org.apache.cloudstack.storage.image.manager.ImageStoreProviderManagerImpl"/>
+
+  <bean id="dataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.DataStoreManagerImpl"
+        depends-on="dataStoreProviderManager">
+    <property name="primaryStoreMgr" ref="primaryDataStoreProviderMgr"/>
+    <property name="imageDataStoreMgr" ref="imageStoreProviderMgr"/>
+  </bean>
+
+  <bean id="CloudStackPrimaryDataStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl"/>
+
+  <bean id="dataStoreProviderManager"
+        class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+    <property name="providers">
+      <list>
+        <!--Data Store Providers-->
+        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+      </list>
+    </property>
+  </bean>
+
   <!--
     Managers
   -->
@@ -622,15 +709,15 @@
     <property name="UserPasswordEncoders" value="#{userPasswordEncoders.Adapters}" />
     <property name="SecurityCheckers" value="#{securityCheckers.Adapters}" />
   </bean>
-  
+
   <bean id="managementServerImpl" class ="com.cloud.server.ManagementServerImpl">
     <property name="UserAuthenticators" value="#{userAuthenticators.Adapters}" />
     <property name="UserPasswordEncoders" value="#{userPasswordEncoders.Adapters}" />
     <property name="HostAllocators" value="#{hostAllocators.Adapters}" />
-	<property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
-	<property name="Planners" value="#{deploymentPlanners.Adapters}" />
+    <property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
+    <property name="Planners" value="#{deploymentPlanners.Adapters}" />
   </bean>
-  
+
   <bean id="storageManagerImpl" class="com.cloud.storage.StorageManagerImpl">
     <property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
   </bean>
@@ -638,7 +725,7 @@
   <bean id="FirstFitPlanner" class="com.cloud.deploy.FirstFitPlanner">
     <property name="name" value="FirstFitPlanner"/>
   </bean>
-  
+
   <bean id="resourceManagerImpl" class="com.cloud.resource.ResourceManagerImpl" >
     <property name="PodAllocators" value="#{podAllocators.Adapters}" />
     <property name="Discoverers" value="#{resourceDiscoverers.Adapters}" />
@@ -653,7 +740,7 @@
     <property name="HostAllocators" value="#{hostAllocators.Adapters}" />
     <property name="Planners" value="#{deploymentPlanners.Adapters}" />
   </bean>
-  
+
   <bean id="networkManagerImpl" class="com.cloud.network.NetworkManagerImpl" >
     <property name="NetworkGurus" value="#{networkGurus.Adapters}" />
     <property name="NetworkElements" value="#{networkElements.Adapters}" />
@@ -664,9 +751,9 @@
   <bean id="networkModelImpl" class="com.cloud.network.NetworkModelImpl">
     <property name="NetworkElements" value="#{networkElements.Adapters}" />
   </bean>
-   
+
   <bean id="configurationServerImpl" class="com.cloud.server.ConfigurationServerImpl" />
-   
+
   <bean id="clusterManagerImpl" class="com.cloud.cluster.ClusterManagerImpl" />
   <bean id="clusteredAgentManagerImpl" class="com.cloud.agent.manager.ClusteredAgentManagerImpl" />
 
@@ -685,7 +772,7 @@
   <bean id="capacityManagerImpl" class="com.cloud.capacity.CapacityManagerImpl" />
   <bean id="clusterFenceManagerImpl" class="com.cloud.cluster.ClusterFenceManagerImpl" />
   <bean id="configurationManagerImpl" class="com.cloud.configuration.ConfigurationManagerImpl" />
-  <bean id="dataStoreProviderManagerImpl" class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl" />
+
   <bean id="elasticLoadBalancerManagerImpl" class="com.cloud.network.lb.ElasticLoadBalancerManagerImpl" />
   <bean id="entityManagerImpl" class="com.cloud.dao.EntityManagerImpl" />
   <bean id="externalDeviceUsageManagerImpl" class="com.cloud.network.ExternalDeviceUsageManagerImpl" />
@@ -724,17 +811,14 @@
   <bean id="vpcManagerImpl" class="com.cloud.network.vpc.VpcManagerImpl" />
   <bean id="vpcVirtualNetworkApplianceManagerImpl" class="com.cloud.network.router.VpcVirtualNetworkApplianceManagerImpl" />
 
+
+
   <!--
     Misc components
   -->
   <bean id="actionEventUtils" class="com.cloud.event.ActionEventUtils" />
   <bean id="agentMonitor" class="com.cloud.agent.manager.AgentMonitor" />
   <bean id="alertGenerator" class="com.cloud.event.AlertGenerator" />
-  <bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" />
-  <bean id="storageCacheManagerImpl" class="org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl"  />
-  <bean id="storageCacheRandomAllocator" class="org.apache.cloudstack.storage.cache.allocator.StorageCacheRandomAllocator" />
-  <bean id="xenserverSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.XenserverSnapshotStrategy" />
-  <bean id="xenserverStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.XenServerStorageMotionStrategy" />
   <bean id="apiDBUtils" class="com.cloud.api.ApiDBUtils" />
   <bean id="apiDiscoveryServiceImpl" class="org.apache.cloudstack.discovery.ApiDiscoveryServiceImpl" />
   <bean id="apiDispatcher" class="com.cloud.api.ApiDispatcher" />
@@ -747,46 +831,24 @@
   <bean id="consoleProxyServlet" class="com.cloud.servlet.ConsoleProxyServlet" />
   <bean id="dataCenterResourceManagerImpl" class="org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceManagerImpl" />
   <bean id="dataDisk" class="org.apache.cloudstack.engine.subsystem.api.storage.type.DataDisk" />
-  <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl" />
-  <bean id="dataObjectManagerImpl" class="org.apache.cloudstack.storage.datastore.DataObjectManagerImpl" />
-  <bean id="dataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.DataStoreManagerImpl" />
-  <bean id="defaultEndPointSelector" class="org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector" />
-  <bean id="primaryDataStoreProviderManagerImpl" class="org.apache.cloudstack.storage.datastore.manager.PrimaryDataStoreProviderManagerImpl" />
-  <bean id="imageStoreProviderManagerImpl" class="org.apache.cloudstack.storage.image.manager.ImageStoreProviderManagerImpl" />  
+
+
   <bean id="eventUtils" class="com.cloud.event.EventUtils" />
-  <bean id="iSCSI" class="org.apache.cloudstack.storage.datastore.type.ISCSI" />
-  <bean id="ISO" class="org.apache.cloudstack.storage.image.format.ISO" />
-  <bean id="templateDataFactoryImpl" class="org.apache.cloudstack.storage.image.TemplateDataFactoryImpl" />
-  <bean id="imageStoreHelper" class="org.apache.cloudstack.storage.image.datastore.ImageStoreHelper" />
-  <bean id="imageFormatHelper" class="org.apache.cloudstack.storage.image.format.ImageFormatHelper" />
-  <bean id="templateServiceImpl" class="org.apache.cloudstack.storage.image.TemplateServiceImpl" />
+  <bean id="podRestService" class="org.apache.cloudstack.engine.rest.service.api.PodRestService" />
   <bean id="iso" class="org.apache.cloudstack.engine.subsystem.api.storage.type.Iso" />
-  <bean id="networkFileSystem" class="org.apache.cloudstack.storage.datastore.type.NetworkFileSystem" />
   <bean id="networkRestService" class="org.apache.cloudstack.engine.rest.service.api.NetworkRestService" />
-  <bean id="OVA" class="org.apache.cloudstack.storage.image.format.OVA" />
-  <bean id="objectInDataStoreManagerImpl" class="org.apache.cloudstack.storage.datastore.ObjectInDataStoreManagerImpl" />
-  <bean id="podRestService" class="org.apache.cloudstack.engine.rest.service.api.PodRestService" />
-  <bean id="primaryDataStoreHelper" class="org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper" />
   <bean id="provisioningServiceImpl" class="org.apache.cloudstack.engine.service.api.ProvisioningServiceImpl" />
-  <bean id="QCOW2" class="org.apache.cloudstack.storage.image.format.QCOW2" />
-  <bean id="registerCompleteServlet" class="com.cloud.servlet.RegisterCompleteServlet" />
   <bean id="rootDisk" class="org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk" />
-  <bean id="snapshotDataFactoryImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotDataFactoryImpl" />
-  <bean id="snapshotServiceImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotServiceImpl" />
-  <bean id="snapshotStateMachineManagerImpl" class="org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManagerImpl" />
+  <bean id="registerCompleteServlet" class="com.cloud.servlet.RegisterCompleteServlet" />
   <bean id="statsCollector" class="com.cloud.server.StatsCollector" />
   <bean id="storagePoolAutomationImpl" class="com.cloud.storage.StoragePoolAutomationImpl" />
-  <bean id="unknown" class="org.apache.cloudstack.storage.image.format.Unknown" />
   <bean id="usageEventUtils" class="com.cloud.event.UsageEventUtils" />
   <bean id="userContextInitializer" class="com.cloud.user.UserContextInitializer" />
-  <bean id="VHD" class="org.apache.cloudstack.storage.image.format.VHD" />
   <bean id="vMEntityManagerImpl" class="org.apache.cloudstack.engine.cloud.entity.api.VMEntityManagerImpl" />
   <bean id="virtualMachineEntityFactory" class="org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntityFactory" />
   <bean id="virtualMachineEntityImpl" class="org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntityImpl" />
   <bean id="virtualMachineRestService" class="org.apache.cloudstack.engine.rest.service.api.VirtualMachineRestService" />
-  <bean id="volumeDataFactoryImpl" class="org.apache.cloudstack.storage.volume.VolumeDataFactoryImpl" />
   <bean id="volumeRestService" class="org.apache.cloudstack.engine.rest.service.api.VolumeRestService" />
-  <bean id="volumeServiceImpl" class="org.apache.cloudstack.storage.volume.VolumeServiceImpl" />
   <bean id="volumeTypeHelper" class="org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeTypeHelper" />
   <bean id="zoneRestService" class="org.apache.cloudstack.engine.rest.service.api.ZoneRestService" />
   <bean id="cloudZonesStartupProcessor" class="com.cloud.hypervisor.CloudZonesStartupProcessor" />
@@ -796,21 +858,18 @@
   <bean id="downloadMonitorImpl" class="com.cloud.storage.download.DownloadMonitorImpl" />
   <bean id="lBHealthCheckManagerImpl" class="com.cloud.network.lb.LBHealthCheckManagerImpl" />
   <bean id="mockAgentManagerImpl" class="com.cloud.agent.manager.MockAgentManagerImpl" />
+
+  <bean id="volumeManagerImpl" class="com.cloud.storage.VolumeManagerImpl" />
+  <bean id="ApplicationLoadBalancerService" class="org.apache.cloudstack.network.lb.ApplicationLoadBalancerManagerImpl" />
+  <bean id="InternalLoadBalancerVMManager" class="org.apache.cloudstack.network.lb.InternalLoadBalancerVMManagerImpl" />
+
+  <!--Simulator Components-->
+  <!--<bean id="simulatorImageStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />-->
   <bean id="mockStorageManagerImpl" class="com.cloud.agent.manager.MockStorageManagerImpl" />
   <bean id="mockVmManagerImpl" class="com.cloud.agent.manager.MockVmManagerImpl" />
   <bean id="mockNetworkManagerImpl" class="com.cloud.agent.manager.MockNetworkManagerImpl" />
   <bean id="simulatorManagerImpl" class="com.cloud.agent.manager.SimulatorManagerImpl" />
   <bean id="vMSnapshotManagerImpl" class="com.cloud.vm.snapshot.VMSnapshotManagerImpl" />
-  <bean id="volumeManagerImpl" class="com.cloud.storage.VolumeManagerImpl" />
-  <bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl" />
-  <bean id="cloudStackImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl" />
-  <bean id="s3ImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl" />
-  <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />
-  <bean id="solidFireDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider" />
-  <bean id="simulatorImageStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />
-  <bean id="ApplicationLoadBalancerService" class="org.apache.cloudstack.network.lb.ApplicationLoadBalancerManagerImpl" />
-  <bean id="InternalLoadBalancerVMManager" class="org.apache.cloudstack.network.lb.InternalLoadBalancerVMManagerImpl" />
-  <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />
 
 
 <!--=======================================================================================================-->

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/client/tomcatconf/componentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in
index d2c2dee..a149327 100644
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@ -18,9 +18,9 @@
   under the License.
 -->
 <beans xmlns="http://www.springframework.org/schema/beans"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
   xmlns:context="http://www.springframework.org/schema/context"
-  xmlns:tx="http://www.springframework.org/schema/tx" 
+  xmlns:tx="http://www.springframework.org/schema/tx"
   xmlns:aop="http://www.springframework.org/schema/aop"
   xsi:schemaLocation="http://www.springframework.org/schema/beans
                       http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
@@ -31,6 +31,8 @@
                       http://www.springframework.org/schema/context
                       http://www.springframework.org/schema/context/spring-context-3.0.xsd">
 
+  <context:annotation-config />
+
 
 <!--
 
@@ -95,7 +97,26 @@
 
   It determines whether or not a adapter is activated or how it is loaded in order in its managing provider,
 
---> 
+-->
+  <bean id="CloudStackImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl"/>
+  <bean id="S3ImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl"/>
+  <bean id="SwiftImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl"/>
+
+  <!--Storage Providers-->
+  <bean id="dataStoreProviderManager"
+        class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+    <property name="providers">
+      <list merge="true">
+        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref local="CloudStackImageStoreProviderImpl"/>
+        <ref local="S3ImageStoreProviderImpl"/>
+        <ref local="SwiftImageStoreProviderImpl"/>
+      </list>
+    </property>
+  </bean>
 
   <!-- Security adapters -->
   <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/client/tomcatconf/nonossComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in
index 7b65e71..4ea1d31 100644
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@ -80,7 +80,6 @@
   <bean id="VmwareDatacenterDaoImpl" class="com.cloud.hypervisor.vmware.dao.VmwareDatacenterDaoImpl" />
   <bean id="VmwareDatacenterZoneMapDaoImpl" class="com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDaoImpl" />
   <bean id="LegacyZoneDaoImpl" class="com.cloud.hypervisor.vmware.dao.LegacyZoneDaoImpl" />
-  <bean id="vmwareStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.VmwareStorageMotionStrategy"/>
 
   <!--
     Nicira support components
@@ -182,7 +181,35 @@
     
   It determines whether or not a adapter is activated or how it is loaded in order in its managing provider,
      
---> 
+-->
+
+
+  <!--Motion Strategies-->
+  <bean id="vmwareStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.VmwareStorageMotionStrategy" />
+  <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl">
+    <property name="strategies">
+      <list>
+        <ref bean="ancientDataMotionStrategy"/>
+        <ref bean="xenserverStorageMotionStrategy"/>
+        <ref local="vmwareStorageMotionStrategy"/>
+      </list>
+    </property>
+  </bean>
+
+  <!--<bean id="SolidfirePrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>-->
+  <!--Storage Providers-->
+  <bean id="dataStoreProviderManager"
+        class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+    <property name="providers">
+      <list merge="true">
+        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="CloudStackImageStoreProviderImpl"/>
+        <ref bean="S3ImageStoreProviderImpl"/>
+        <ref bean="SwiftImageStoreProviderImpl"/>
+        <!--<ref local="SolidfirePrimaryDataStoreProvider"/>-->
+      </list>
+    </property>
+  </bean>
 
   <!-- Security adapters -->
   <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/client/tomcatconf/simulatorComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in
index cfe0a9a..74312fe 100644
--- a/client/tomcatconf/simulatorComponentContext.xml.in
+++ b/client/tomcatconf/simulatorComponentContext.xml.in
@@ -18,17 +18,8 @@
   -->
 <beans xmlns="http://www.springframework.org/schema/beans"
        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xmlns:context="http://www.springframework.org/schema/context"
-       xmlns:tx="http://www.springframework.org/schema/tx"
-       xmlns:aop="http://www.springframework.org/schema/aop"
        xsi:schemaLocation="http://www.springframework.org/schema/beans
-                      http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
-                      http://www.springframework.org/schema/tx
-                      http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
-                      http://www.springframework.org/schema/aop
-                      http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
-                      http://www.springframework.org/schema/context
-                      http://www.springframework.org/schema/context/spring-context-3.0.xsd">
+                      http://www.springframework.org/schema/beans/spring-beans-3.0.xsd">
 
   <!--
     OSS deployment component configuration
@@ -36,10 +27,6 @@
   <bean id="databaseUpgradeChecker" class="com.cloud.upgrade.DatabaseUpgradeChecker"/>
   <bean id="configurationDaoImpl" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
 
-  <!-- simulator components -->
-  <!--<bean id="SimulatorSecondaryDiscoverer" class="com.cloud.resource.SimulatorSecondaryDiscoverer">-->
-    <!--<property name="name" value="SecondaryStorage"/>-->
-  <!--</bean>-->
   <bean id="SimulatorDiscoverer" class="com.cloud.resource.SimulatorDiscoverer">
     <property name="name" value="Simulator Agent"/>
   </bean>
@@ -47,6 +34,23 @@
     <property name="name" value="Simulator Guru"/>
   </bean>
 
+  <bean id="SimulatorImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl"/>
+
+  <!--Storage Providers-->
+  <!--<bean id="dataStoreProviderManagerChild" parent="dataStoreProviderManager">-->
+  <bean id="dataStoreProviderManager"
+        class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
+    <property name="providers">
+      <!--Override the cloudstack default image store provider to use simulator defined provider-->
+      <list>
+        <!--Data Store Providers-->
+        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="SimulatorImageStoreProviderImpl"/>
+      </list>
+    </property>
+  </bean>
+
   <!--
     Managers & pluggable adapters configuration under OSS deployment
   -->
@@ -85,7 +89,6 @@
     <property name="Adapters">
       <list>
         <ref bean="SimulatorDiscoverer"/>
-        <ref bean="SimulatorSecondaryDiscoverer"/>
         <ref bean="XcpServerDiscoverer"/>
         <ref bean="SecondaryStorageDiscoverer"/>
         <ref bean="KvmServerDiscoverer"/>
@@ -124,10 +127,6 @@
         <ref bean="FirstFitPlanner"/>
         <ref bean="UserDispersingPlanner"/>
         <ref bean="UserConcentratedPodPlanner"/>
-
-        <!--
-                  <ref bean="BareMetalPlanner" />
-        -->
       </list>
     </property>
   </bean>
@@ -175,9 +174,6 @@
       <list>
         <ref bean="VirtualRouter"/>
         <ref bean="VpcVirtualRouter"/>
-        <!--
-                  <ref bean="BareMetalDhcp"/>
-        -->
       </list>
     </property>
   </bean>
@@ -206,11 +202,6 @@
         <ref bean="SecurityGroupProvider"/>
         <ref bean="VpcVirtualRouter"/>
         <ref bean="InternalLbVm"/>
-        <!--
-                  <ref bean="BareMetalDhcp"/>
-                  <ref bean="BareMetalPxe"/>
-                  <ref bean="BareMetalUserdata"/>
-        -->
       </list>
     </property>
   </bean>
@@ -218,7 +209,8 @@
   <!--
     Image Store
   -->
-  <!--<bean id="simulatorImageStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />-->
+
+
 
   <bean id="GlobalLoadBalancingRulesServiceImpl"
         class="org.apache.cloudstack.region.gslb.GlobalLoadBalancingRulesServiceImpl"/>
@@ -244,5 +236,4 @@
     <property name="name" value="ExplicitDedicationProcessor"/>
     <property name="type" value="ExplicitDedication"/>
   </bean>
-
 </beans>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/developer/pom.xml
----------------------------------------------------------------------
diff --git a/developer/pom.xml b/developer/pom.xml
index a680b8a..e9284f4 100644
--- a/developer/pom.xml
+++ b/developer/pom.xml
@@ -58,6 +58,12 @@
       <version>${project.version}</version>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-plugin-storage-image-simulator</artifactId>
+      <version>${project.version}</version>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
   <build>
     <defaultGoal>install</defaultGoal>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
index 22de0b2..c1cbdc7 100644
--- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
+++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
@@ -18,11 +18,9 @@
  */
 package org.apache.cloudstack.storage.motion;
 
-import java.util.List;
-import java.util.Map;
-
-import javax.inject.Inject;
-
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
+import com.cloud.utils.exception.CloudRuntimeException;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
@@ -32,9 +30,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.springframework.stereotype.Component;
 
-import com.cloud.agent.api.to.VirtualMachineTO;
-import com.cloud.host.Host;
-import com.cloud.utils.exception.CloudRuntimeException;
+import javax.inject.Inject;
+import java.util.List;
+import java.util.Map;
 
 @Component
 public class DataMotionServiceImpl implements DataMotionService {
@@ -72,4 +70,8 @@ public class DataMotionServiceImpl implements DataMotionService {
         }
         throw new CloudRuntimeException("can't find strategy to move data");
     }
+
+    public void setStrategies(List<DataMotionStrategy> strategies) {
+        this.strategies = strategies;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/engine/storage/integration-test/test/resource/storageContext.xml
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml
index 9f4f102..f9c891a 100644
--- a/engine/storage/integration-test/test/resource/storageContext.xml
+++ b/engine/storage/integration-test/test/resource/storageContext.xml
@@ -81,7 +81,8 @@
   <bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl" />
   <bean id="cloudStackImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl" />
   <bean id="s3ImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl" />
-  <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />  
+  <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />
+  <bean id="simulatorImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />
   <bean id="BAREMETAL" class="org.apache.cloudstack.storage.image.format.BAREMETAL" />
   <bean id="storagePoolAutomationImpl" class="com.cloud.storage.StoragePoolAutomationImpl" />
   <bean id="AccountGuestVlanMapDaoImpl" class="com.cloud.network.dao.AccountGuestVlanMapDaoImpl" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java
index b92f92f..71df262 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java
@@ -18,11 +18,9 @@
  */
 package org.apache.cloudstack.storage.datastore;
 
-import java.util.List;
-import java.util.Map;
-
-import javax.inject.Inject;
-
+import com.cloud.storage.DataStoreRole;
+import com.cloud.utils.exception.CloudRuntimeException;
+import edu.emory.mathcs.backport.java.util.Collections;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
 import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
@@ -30,22 +28,20 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 import org.springframework.stereotype.Component;
 
-import com.cloud.storage.DataStoreRole;
-import com.cloud.utils.exception.CloudRuntimeException;
-
-import edu.emory.mathcs.backport.java.util.Collections;
+import javax.inject.Inject;
+import java.util.List;
 
 @Component
 public class DataStoreManagerImpl implements DataStoreManager {
     @Inject
-    PrimaryDataStoreProviderManager primaryStorMgr;
+    PrimaryDataStoreProviderManager primaryStoreMgr;
     @Inject
     ImageStoreProviderManager imageDataStoreMgr;
 
     @Override
     public DataStore getDataStore(long storeId, DataStoreRole role) {
         if (role == DataStoreRole.Primary) {
-            return primaryStorMgr.getPrimaryDataStore(storeId);
+            return primaryStoreMgr.getPrimaryDataStore(storeId);
         } else if (role == DataStoreRole.Image) {
             return imageDataStoreMgr.getImageStore(storeId);
         } else if (role == DataStoreRole.ImageCache) {
@@ -57,7 +53,7 @@ public class DataStoreManagerImpl implements DataStoreManager {
     @Override
     public DataStore getDataStore(String uuid, DataStoreRole role) {
         if (role == DataStoreRole.Primary) {
-            return primaryStorMgr.getPrimaryDataStore(uuid);
+            return primaryStoreMgr.getPrimaryDataStore(uuid);
         } else if (role == DataStoreRole.Image) {
             return imageDataStoreMgr.getImageStore(uuid);
         }
@@ -81,7 +77,7 @@ public class DataStoreManagerImpl implements DataStoreManager {
 
     @Override
     public DataStore getPrimaryDataStore(long storeId) {
-        return primaryStorMgr.getPrimaryDataStore(storeId);
+        return primaryStoreMgr.getPrimaryDataStore(storeId);
     }
 
     @Override
@@ -94,4 +90,11 @@ public class DataStoreManagerImpl implements DataStoreManager {
         return imageDataStoreMgr.listImageStores();
     }
 
+    public void setPrimaryStoreMgr(PrimaryDataStoreProviderManager primaryStoreMgr) {
+        this.primaryStoreMgr = primaryStoreMgr;
+    }
+
+    public void setImageDataStoreMgr(ImageStoreProviderManager imageDataStoreMgr) {
+        this.imageDataStoreMgr = imageDataStoreMgr;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f1134da8/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java
index 50238a8..92b4e7a 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java
@@ -18,15 +18,8 @@
  */
 package org.apache.cloudstack.storage.datastore.provider;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.utils.component.ManagerBase;
 import org.apache.cloudstack.api.response.StorageProviderResponse;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
@@ -38,8 +31,13 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
-import com.cloud.exception.InvalidParameterValueException;
-import com.cloud.utils.component.ManagerBase;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 @Component
 public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager {
@@ -50,7 +48,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
     @Inject
     PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
     @Inject
-    ImageStoreProviderManager imageDataStoreProviderMgr;
+    ImageStoreProviderManager imageStoreProviderMgr;
 
     @Override
     public DataStoreProvider getDataStoreProvider(String name) {
@@ -125,7 +123,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
                             (PrimaryDataStoreDriver) provider.getDataStoreDriver());
                     primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener());
                 } else if (types.contains(DataStoreProviderType.IMAGE)) {
-                    imageDataStoreProviderMgr.registerDriver(provider.getName(),
+                    imageStoreProviderMgr.registerDriver(provider.getName(),
                             (ImageStoreDriver) provider.getDataStoreDriver());
                 }
             } catch (Exception e) {
@@ -168,4 +166,16 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
             throw new InvalidParameterValueException("Invalid parameter: " + type);
         }
     }
+
+    public void setProviders(List<DataStoreProvider> providers) {
+        this.providers = providers;
+    }
+
+    public void setPrimaryDataStoreProviderMgr(PrimaryDataStoreProviderManager primaryDataStoreProviderMgr) {
+        this.primaryDataStoreProviderMgr = primaryDataStoreProviderMgr;
+    }
+
+    public void setImageStoreProviderMgr(ImageStoreProviderManager imageDataStoreProviderMgr) {
+        this.imageStoreProviderMgr = imageDataStoreProviderMgr;
+    }
 }


[02/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-2385: template download fails with Unexpected failure in Vmware.

Description:

    Putting in fix to allow download of guest VM templates that are available
    across zones.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/78922589
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/78922589
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/78922589

Branch: refs/heads/vmsync
Commit: 78922589bbdc7914b9d4ce3b97a9fcf03d4b7b57
Parents: 1c924e5
Author: Vijayendra Bhamidipati <vi...@citrix.com>
Authored: Thu Jun 27 12:37:20 2013 -0700
Committer: Devdeep Singh <de...@gmail.com>
Committed: Fri Jun 28 23:08:36 2013 +0530

----------------------------------------------------------------------
 .../template/HypervisorTemplateAdapter.java     | 104 ++++++++++---------
 1 file changed, 55 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/78922589/server/src/com/cloud/template/HypervisorTemplateAdapter.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
index 92148c3..569d947 100755
--- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java
+++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
@@ -308,60 +308,66 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
 
     @Override
     public TemplateProfile prepareExtractTemplate(ExtractTemplateCmd extractcmd) {
-             TemplateProfile profile = super.prepareExtractTemplate(extractcmd);
-             VMTemplateVO template = profile.getTemplate();
-             Long zoneId = profile.getZoneId();
-             Long templateId = template.getId();
-
-             if (template.getHypervisorType() == HypervisorType.VMware) {
-                PrepareOVAPackingCommand cmd = null;
-                String zoneName="";
-                List<HostVO> secondaryStorageHosts;
-                if (!template.isCrossZones() && zoneId != null) {
-                        DataCenterVO zone = _dcDao.findById(zoneId);
-                        zoneName = zone.getName();
-                List<DataStore> imageStores = this.storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId()));
-                if (imageStores == null || imageStores.size() == 0) {
-                    throw new CloudRuntimeException("Unable to find image store to download template " + profile.getTemplate());
+        TemplateProfile profile = super.prepareExtractTemplate(extractcmd);
+        VMTemplateVO template = profile.getTemplate();
+        Long zoneId = profile.getZoneId();
+        Long templateId = template.getId();
+
+        // Simply return profile if non-ESX hypervisor.
+        if (template.getHypervisorType() == HypervisorType.VMware) {
+            PrepareOVAPackingCommand cmd = null;
+            String zoneName="";
+            List<DataStore> imageStores = null;
+
+            if (!template.isCrossZones()) {
+                if (zoneId == null) {
+                    throw new CloudRuntimeException("ZoneId cannot be null for a template that is not available across zones");
                 }
+                // Else get the list of image stores in this zone's scope.
+                DataCenterVO zone = _dcDao.findById(zoneId);
+                zoneName = zone.getName();
+                imageStores = this.storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId()));
+            } else {
+                // template is available across zones. Get a list of all image stores.
+                imageStores = this.storeMgr.listImageStores();
+            }
+
+            if (imageStores == null || imageStores.size() == 0) {
+                throw new CloudRuntimeException("Unable to find an image store zone when trying to download template " + profile.getTemplate());
+            }
 
-                    s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName);
+            s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName);
 
-                // Make sure the template is downloaded to all the necessary secondary storage hosts
+            // Make sure the template is downloaded to all the necessary secondary storage hosts
+
+            for (DataStore store : imageStores) {
+                long storeId = store.getId();
+                List<TemplateDataStoreVO> templateStoreVOs = _tmpltStoreDao.listByTemplateStore(templateId, storeId);
+                for (TemplateDataStoreVO templateStoreVO : templateStoreVOs) {
+                    if (templateStoreVO.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
+                        String errorMsg = "Please specify a template that is not currently being downloaded.";
+                        s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + ".");
+                        throw new CloudRuntimeException(errorMsg);
+                    }
+                    String installPath = templateStoreVO.getInstallPath();
+                    if (installPath != null) {
+                        EndPoint ep = _epSelector.select(store);
+                        if (ep == null) {
+                            s_logger.warn("prepareOVAPacking (hyervisorTemplateAdapter): There is no secondary storage VM for secondary storage host " + store.getName());
+                            throw new CloudRuntimeException("PrepareExtractTemplate: can't locate ssvm for SecStorage Host.");
+                        }
+                        cmd = new PrepareOVAPackingCommand(store.getUri(), installPath);
+                        cmd.setContextParam("hypervisor", HypervisorType.VMware.toString());
+                        Answer answer = ep.sendMessage(cmd);
 
-                for (DataStore store : imageStores) {
-                    long storeId = store.getId();
-                    List<TemplateDataStoreVO> templateStoreVOs = _tmpltStoreDao.listByTemplateStore(templateId, storeId);
-                    for (TemplateDataStoreVO templateStoreVO : templateStoreVOs) {
-                        if (templateStoreVO.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
-                                 String errorMsg = "Please specify a template that is not currently being downloaded.";
-                            s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + ".");
-                                 throw new CloudRuntimeException(errorMsg);
+                        if (answer == null || !answer.getResult()) {
+                            s_logger.debug("Failed to create OVA for template " + templateStoreVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails()));
+                            throw new CloudRuntimeException("PrepareExtractTemplate: Failed to create OVA for template extraction. ");
                         }
-                        String installPath = templateStoreVO.getInstallPath();
-                        if (installPath != null) {
-                            EndPoint ep = _epSelector.select(store);
-                            if (ep == null) {
-                                s_logger.warn("prepareOVAPacking (hyervisorTemplateAdapter): There is no secondary storage VM for secondary storage host " + store.getName());
-                                 throw new CloudRuntimeException("PrepareExtractTemplate: can't locate ssvm for SecStorage Host.");
-                              }
-                           //Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new PrepareOVAPackingCommand(secondaryStorageHost.getStorageUrl(), installPath));
-                            cmd = new PrepareOVAPackingCommand(store.getUri(), installPath);
-                            cmd.setContextParam("hypervisor", HypervisorType.VMware.toString());
-                            Answer answer = ep.sendMessage(cmd);
-
-                                  if (answer == null || !answer.getResult()) {
-                                      s_logger.debug("Failed to create OVA for template " + templateStoreVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails()));
-                                      throw new CloudRuntimeException("PrepareExtractTemplate: Failed to create OVA for template extraction. ");
-                                  }
-                       }
-              }
-           }
-         }  else {
-            s_logger.debug("Failed to create OVA for template " + template + " due to zone non-existing.");
-                        throw new CloudRuntimeException("PrepareExtractTemplate: Failed to create OVA for template extraction. ");
+                    }
+                }
+            }
         }
-         }
         return profile;
-        }
+    }
 }


[29/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Reduced resource usage in vpc test cases

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e14f355a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e14f355a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e14f355a

Branch: refs/heads/vmsync
Commit: e14f355a0bfd7d76ff953e39e1c5b4fee597f917
Parents: e8ea6b1
Author: rayeesn <ra...@citrix.com>
Authored: Sat Jun 29 15:07:15 2013 -0700
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sun Jun 30 11:15:32 2013 +0530

----------------------------------------------------------------------
 test/integration/component/test_vpc_network_lbrules.py       | 4 ++--
 test/integration/component/test_vpc_network_pfrules.py       | 4 ++--
 test/integration/component/test_vpc_network_staticnatrule.py | 4 ++--
 test/integration/component/test_vpc_routers.py               | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e14f355a/test/integration/component/test_vpc_network_lbrules.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc_network_lbrules.py b/test/integration/component/test_vpc_network_lbrules.py
index b0357fa..865cf0e 100644
--- a/test/integration/component/test_vpc_network_lbrules.py
+++ b/test/integration/component/test_vpc_network_lbrules.py
@@ -65,8 +65,8 @@ class Services:
                                     "name": "Tiny Instance",
                                     "displaytext": "Tiny Instance",
                                     "cpunumber": 1,
-                                    "cpuspeed": 1000,
-                                    "memory": 512,
+                                    "cpuspeed": 100,
+                                    "memory": 128,
                                     },
                         "network_offering": {
                                     "name": 'VPC Network offering',

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e14f355a/test/integration/component/test_vpc_network_pfrules.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc_network_pfrules.py b/test/integration/component/test_vpc_network_pfrules.py
index b478b6a..ad7bbae 100644
--- a/test/integration/component/test_vpc_network_pfrules.py
+++ b/test/integration/component/test_vpc_network_pfrules.py
@@ -62,8 +62,8 @@ class Services:
                 "name": "Tiny Instance",
                 "displaytext": "Tiny Instance",
                 "cpunumber": 1,
-                "cpuspeed": 1000,
-                "memory": 512,
+                "cpuspeed": 100,
+                "memory": 128,
             },
             "network_offering": {
                 "name": 'VPC Network offering',

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e14f355a/test/integration/component/test_vpc_network_staticnatrule.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc_network_staticnatrule.py b/test/integration/component/test_vpc_network_staticnatrule.py
index c5d9e57..1410f5e 100644
--- a/test/integration/component/test_vpc_network_staticnatrule.py
+++ b/test/integration/component/test_vpc_network_staticnatrule.py
@@ -61,8 +61,8 @@ class Services:
                                                 "name": "Tiny Instance",
                                                 "displaytext": "Tiny Instance",
                                                 "cpunumber": 1,
-                                                "cpuspeed": 1000,
-                                                "memory": 512,
+                                                "cpuspeed": 100,
+                                                "memory": 128,
                                                 },
                                 "network_offering": {
                                                 "name": 'VPC Network offering',

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e14f355a/test/integration/component/test_vpc_routers.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc_routers.py b/test/integration/component/test_vpc_routers.py
index a8559e5..043a01b 100644
--- a/test/integration/component/test_vpc_routers.py
+++ b/test/integration/component/test_vpc_routers.py
@@ -49,7 +49,7 @@ class Services:
                                     "displaytext": "Tiny Instance",
                                     "cpunumber": 1,
                                     "cpuspeed": 100,
-                                    "memory": 64,
+                                    "memory": 128,
                                     },
                          "service_offering_new": {
                                     "name": "Small Instance",


[45/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-2288: NPE while creating volume from snapshot when the primary storage is in maintenance state.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ffd5f1a7
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ffd5f1a7
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ffd5f1a7

Branch: refs/heads/vmsync
Commit: ffd5f1a777ac9af5b65b1fcd357684d7dafcce87
Parents: e07a8b3
Author: Sanjay Tripathi <sa...@citrix.com>
Authored: Thu Jun 6 16:43:04 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Mon Jul 1 20:17:11 2013 +0530

----------------------------------------------------------------------
 server/src/com/cloud/storage/VolumeManagerImpl.java | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ffd5f1a7/server/src/com/cloud/storage/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java
index a293da5..30fb322 100644
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@ -527,7 +527,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
 
     @DB
     protected VolumeInfo createVolumeFromSnapshot(VolumeVO volume,
-            SnapshotVO snapshot) {
+            SnapshotVO snapshot) throws StorageUnavailableException {
         Account account = _accountDao.findById(volume.getAccountId());
 
         final HashSet<StoragePool> poolsToAvoid = new HashSet<StoragePool>();
@@ -555,6 +555,12 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
             }
         }
 
+        if (pool == null) {
+            String msg = "There are no available storage pools to store the volume in";
+            s_logger.info(msg);
+            throw new StorageUnavailableException(msg, -1);
+        }
+
         VolumeInfo vol = volFactory.getVolume(volume.getId());
         DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
         SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Image);
@@ -605,7 +611,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         }
     }
 
-    protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) {
+    protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) throws StorageUnavailableException {
         VolumeInfo createdVolume = null;
         SnapshotVO snapshot = _snapshotDao.findById(snapshotId);
         createdVolume = createVolumeFromSnapshot(volume,


[38/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix injection of datastoreproviders in nonoss context

the datastore provider references are not in application context anymore
and have been moved to respective componentCOntexts. Plug them in by
default for nonoss server to startup successfully.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>
(cherry picked from commit 01debd59d3a21c4164e4ef7e6f4d9e279933e34f)


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/9f12a251
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/9f12a251
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/9f12a251

Branch: refs/heads/vmsync
Commit: 9f12a251919f2223b17fdd6b10ea5bc99096a1d1
Parents: b4f6b57
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Mon Jul 1 13:47:16 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Mon Jul 1 13:52:53 2013 +0530

----------------------------------------------------------------------
 client/tomcatconf/applicationContext.xml.in      |  4 ++--
 client/tomcatconf/componentContext.xml.in        |  2 +-
 client/tomcatconf/nonossComponentContext.xml.in  | 19 +++++++++++++------
 .../tomcatconf/simulatorComponentContext.xml.in  |  2 +-
 4 files changed, 17 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9f12a251/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in
index 3d5d4fa..14255c1 100644
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@ -688,7 +688,7 @@
     <property name="imageDataStoreMgr" ref="imageStoreProviderMgr"/>
   </bean>
 
-  <bean id="CloudStackPrimaryDataStoreProviderImpl"
+  <bean id="cloudStackPrimaryDataStoreProviderImpl"
         class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl"/>
 
   <bean id="dataStoreProviderManager"
@@ -696,7 +696,7 @@
     <property name="providers">
       <list>
         <!--Data Store Providers-->
-        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
       </list>
     </property>
   </bean>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9f12a251/client/tomcatconf/componentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in
index 966faf5..1fbec61 100644
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@ -112,7 +112,7 @@
         class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
     <property name="providers">
       <list merge="true">
-        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
         <ref local="cloudStackImageStoreProviderImpl"/>
         <ref local="s3ImageStoreProviderImpl"/>
         <ref local="swiftImageStoreProviderImpl"/>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9f12a251/client/tomcatconf/nonossComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in
index e7828e1..ffa6281 100644
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@ -195,17 +195,24 @@
     </property>
   </bean>
 
-  <!--<bean id="SolidfirePrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>-->
+  <bean id="cloudStackImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl"/>
+  <bean id="s3ImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl"/>
+  <bean id="swiftImageStoreProviderImpl"
+        class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl"/>
+  <bean id="solidFireDataStoreProvider"
+        class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>
+
   <!--Storage Providers-->
   <bean id="dataStoreProviderManager"
         class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl">
     <property name="providers">
       <list merge="true">
-        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
-        <ref bean="CloudStackImageStoreProviderImpl"/>
-        <ref bean="S3ImageStoreProviderImpl"/>
-        <ref bean="SwiftImageStoreProviderImpl"/>
-        <!--<ref local="SolidfirePrimaryDataStoreProvider"/>-->
+        <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="cloudStackImageStoreProviderImpl"/>
+        <ref bean="s3ImageStoreProviderImpl"/>
+        <ref bean="solidFireDataStoreProvider"/>
       </list>
     </property>
   </bean>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/9f12a251/client/tomcatconf/simulatorComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in
index 579ae1b..92278a4 100644
--- a/client/tomcatconf/simulatorComponentContext.xml.in
+++ b/client/tomcatconf/simulatorComponentContext.xml.in
@@ -45,7 +45,7 @@
       <!--Override the cloudstack default image store provider to use simulator defined provider-->
       <list>
         <!--Data Store Providers-->
-        <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
+        <ref bean="cloudStackPrimaryDataStoreProviderImpl"/>
         <ref bean="SimulatorImageStoreProviderImpl"/>
       </list>
     </property>


[31/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix tab/space issues

Several test failures occurred due to tab issues

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/c7315975
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/c7315975
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/c7315975

Branch: refs/heads/vmsync
Commit: c7315975d22e95f48eee929583f8ec3c30f65094
Parents: 2a51c3e
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sun Jun 30 13:20:27 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sun Jun 30 13:20:27 2013 +0530

----------------------------------------------------------------------
 test/integration/component/test_accounts.py     |   2 +-
 .../component/test_affinity_groups.py           | 135 ++---
 .../component/test_netscaler_configs.py         |   8 +-
 .../component/test_shared_networks.py           |   2 +-
 test/integration/component/test_vpc_routers.py  |  76 ++-
 ...deploy_vms_with_varied_deploymentplanners.py |  48 +-
 test/integration/smoke/test_network.py          | 175 +++----
 test/integration/smoke/test_vm_snapshots.py     | 519 ++++++++++---------
 8 files changed, 446 insertions(+), 519 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/component/test_accounts.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_accounts.py b/test/integration/component/test_accounts.py
index ee38c6d..65c0c6f 100644
--- a/test/integration/component/test_accounts.py
+++ b/test/integration/component/test_accounts.py
@@ -753,7 +753,7 @@ class TestServiceOfferingHierarchy(cloudstackTestCase):
                             domainid=cls.domain_2.id
                             )
 
-       cls._cleanup = [
+        cls._cleanup = [
                        cls.account_2,
                        cls.domain_2,
                        cls.service_offering,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/component/test_affinity_groups.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py
index 79e35e0..44bf90c 100644
--- a/test/integration/component/test_affinity_groups.py
+++ b/test/integration/component/test_affinity_groups.py
@@ -304,15 +304,14 @@ class TestListAffinityGroups(cloudstackTestCase):
 
     def tearDown(self):
         try:
-            cls.api_client = super(TestListAffinityGroups, cls).getClsTestClient().getApiClient()
+            self.api_client = super(TestListAffinityGroups, self).getClsTestClient().getApiClient()
             #Clean up, terminate the created templates
-            cleanup_resources(cls.api_client, cls.cleanup)
+            cleanup_resources(self.api_client, self.cleanup)
         except Exception as e:
             raise Exception("Warning: Exception during cleanup : %s" % e)
 
     @classmethod
     def tearDownClass(cls):
-
         try:
             cls.api_client = super(TestListAffinityGroups, cls).getClsTestClient().getApiClient()
             #Clean up, terminate the created templates
@@ -327,10 +326,6 @@ class TestListAffinityGroups(cloudstackTestCase):
             api_client = self.api_client
         if aff_grp == None:
             self.services["host_anti_affinity_0"]
-        #if acc == None:
-        #    acc = self.account.name
-        #if domainid == None:
-        #    domainid = self.domain.id
 
         try:
             self.aff_grp.append(AffinityGroup.create(api_client,
@@ -339,34 +334,25 @@ class TestListAffinityGroups(cloudstackTestCase):
             raise Exception("Error: Creation of Affinity Group failed : %s" %e)
 
     def create_vm_in_aff_grps(self, ag_list):
-   #try:
-   self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
-   vm = VirtualMachine.create(
-               self.api_client,
-               self.services["virtual_machine"],
-               templateid=self.template.id,
-               #accountid=self.account.name,
-               #domainid=self.account.domainid,
-               serviceofferingid=self.service_offering.id,
-               affinitygroupnames=ag_list
-            )
-   self.debug('Created VM=%s in Affinity Group=%s' %
-           (vm.id, ag_list[0]))
-   #except Exception:
-       #self.debug('Unable to create VM in a Affinity Group=%s'
-       #                 % ag_list[0])
-
-   list_vm = list_virtual_machines(self.api_client, id=vm.id)
-
+        self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
+        vm = VirtualMachine.create(
+                   self.api_client,
+                   self.services["virtual_machine"],
+                   templateid=self.template.id,
+                   serviceofferingid=self.service_offering.id,
+                   affinitygroupnames=ag_list
+                )
+        self.debug('Created VM=%s in Affinity Group=%s' %
+               (vm.id, ag_list[0]))
+
+        list_vm = list_virtual_machines(self.api_client, id=vm.id)
         self.assertEqual(isinstance(list_vm, list), True,
                          "Check list response returns a valid list")
         self.assertNotEqual(len(list_vm),0,
                             "Check VM available in List Virtual Machines")
-
         vm_response = list_vm[0]
         self.assertEqual(vm_response.state, 'Running',
                          msg="VM is not in Running state")
-
         return vm, vm_response.hostid
 
     def test_01_list_aff_grps_for_vm(self):
@@ -543,11 +529,6 @@ class TestDeleteAffinityGroups(cloudstackTestCase):
             api_client = self.api_client
         if aff_grp == None:
             self.services["host_anti_affinity_0"]
-        #if acc == None:
-        #    acc = self.account.name
-        #if domainid == None:
-        #    domainid = self.domain.id
-
         try:
             self.aff_grp.append(AffinityGroup.create(api_client,
                                                      aff_grp, acc, domainid))
@@ -555,24 +536,18 @@ class TestDeleteAffinityGroups(cloudstackTestCase):
             raise Exception("Error: Creation of Affinity Group failed : %s" %e)
 
     def create_vm_in_aff_grps(self, ag_list):
-   #try:
-   self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
-   vm = VirtualMachine.create(
-               self.api_client,
-               self.services["virtual_machine"],
-               templateid=self.template.id,
-               #accountid=self.account.name,
-               #domainid=self.account.domainid,
-               serviceofferingid=self.service_offering.id,
-               affinitygroupnames=ag_list
-            )
-   self.debug('Created VM=%s in Affinity Group=%s' %
-           (vm.id, ag_list[0]))
-   #except Exception:
-       #self.debug('Unable to create VM in a Affinity Group=%s'
-       #                 % ag_list[0])
-
-   list_vm = list_virtual_machines(self.api_client, id=vm.id)
+        self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
+        vm = VirtualMachine.create(
+                   self.api_client,
+                   self.services["virtual_machine"],
+                   templateid=self.template.id,
+                   serviceofferingid=self.service_offering.id,
+                   affinitygroupnames=ag_list
+                )
+        self.debug('Created VM=%s in Affinity Group=%s' %
+               (vm.id, ag_list[0]))
+
+        list_vm = list_virtual_machines(self.api_client, id=vm.id)
 
         self.assertEqual(isinstance(list_vm, list), True,
                          "Check list response returns a valid list")
@@ -817,11 +792,6 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
             api_client = self.api_client
         if aff_grp == None:
             self.services["host_anti_affinity_0"]
-        #if acc == None:
-        #    acc = self.account.name
-        #if domainid == None:
-        #    domainid = self.domain.id
-
         try:
             self.aff_grp.append(AffinityGroup.create(api_client,
                                                      aff_grp, acc, domainid))
@@ -829,24 +799,18 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
             raise Exception("Error: Creation of Affinity Group failed : %s" %e)
 
     def create_vm_in_aff_grps(self, ag_list):
-   #try:
-   self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
-   vm = VirtualMachine.create(
-               self.api_client,
+        self.debug('Creating VM in AffinityGroup=%s' % ag_list[0])
+        vm = VirtualMachine.create(
+                self.api_client,
                self.services["virtual_machine"],
                templateid=self.template.id,
-               #accountid=self.account.name,
-               #domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                affinitygroupnames=ag_list
             )
-   self.debug('Created VM=%s in Affinity Group=%s' %
-           (vm.id, ag_list[0]))
-   #except Exception:
-       #self.debug('Unable to create VM in a Affinity Group=%s'
-       #                 % ag_list[0])
+        self.debug('Created VM=%s in Affinity Group=%s' %
+                   (vm.id, ag_list[0]))
 
-   list_vm = list_virtual_machines(self.api_client, id=vm.id)
+        list_vm = list_virtual_machines(self.api_client, id=vm.id)
 
         self.assertEqual(isinstance(list_vm, list), True,
                          "Check list response returns a valid list")
@@ -996,7 +960,7 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
 
         vm1.start(self.api_client)
         list_aff_grps = AffinityGroup.list(self.api_client,
-                                           virtualmachineid=vm.id)
+                                           virtualmachineid=vm1.id)
         self.assertEqual(list_aff_grps, [], "The affinity groups list is not empyty")
 
         vm1.delete(self.api_client)
@@ -1096,10 +1060,6 @@ class TestDeployVMAffinityGroups(cloudstackTestCase):
             api_client = self.api_client
         if aff_grp == None:
             self.services["host_anti_affinity_0"]
-        #if acc == None:
-        #    acc = self.account.name
-        #if domainid == None:
-        #    domainid = self.domain.id
 
         try:
             self.aff_grp.append(AffinityGroup.create(api_client,
@@ -1111,21 +1071,19 @@ class TestDeployVMAffinityGroups(cloudstackTestCase):
 
         if api_client == None:
             api_client = self.api_client
-   self.debug('Creating VM in AffinityGroup=%s' % ag_list)
-   vm = VirtualMachine.create(
+        self.debug('Creating VM in AffinityGroup=%s' % ag_list)
+        vm = VirtualMachine.create(
                api_client,
                self.services["virtual_machine"],
                templateid=self.template.id,
-               #accountid=self.account.name,
-               #domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                affinitygroupnames=ag_list,
                 affinitygroupids=ag_ids
             )
-   self.debug('Created VM=%s in Affinity Group=%s' %
-           (vm.id, ag_list))
+        self.debug('Created VM=%s in Affinity Group=%s' %
+                    (vm.id, ag_list))
 
-   list_vm = list_virtual_machines(self.api_client, id=vm.id)
+        list_vm = list_virtual_machines(self.api_client, id=vm.id)
 
         self.assertEqual(isinstance(list_vm, list), True,
                          "Check list response returns a valid list")
@@ -1143,7 +1101,6 @@ class TestDeployVMAffinityGroups(cloudstackTestCase):
         """
             Deploy VM without affinity group
         """
-
         vm1, hostid1 = self.create_vm_in_aff_grps()
 
         vm1.delete(self.api_client)
@@ -1441,10 +1398,6 @@ class TestAffinityGroupsAdminUser(cloudstackTestCase):
             api_client = self.api_client
         if aff_grp == None:
             self.services["host_anti_affinity_0"]
-        #if acc == None:
-        #    acc = self.account.name
-        #if domainid == None:
-        #    domainid = self.domain.id
 
         try:
             self.aff_grp.append(AffinityGroup.create(api_client,
@@ -1456,21 +1409,19 @@ class TestAffinityGroupsAdminUser(cloudstackTestCase):
 
         if api_client == None:
             api_client = self.api_client
-   self.debug('Creating VM in AffinityGroup=%s' % ag_list)
-   vm = VirtualMachine.create(
+        self.debug('Creating VM in AffinityGroup=%s' % ag_list)
+        vm = VirtualMachine.create(
                api_client,
                self.services["virtual_machine"],
                templateid=self.template.id,
-               #accountid=self.account.name,
-               #domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                affinitygroupnames=ag_list,
                 affinitygroupids=ag_ids
             )
-   self.debug('Created VM=%s in Affinity Group=%s' %
-           (vm.id, ag_list))
+        self.debug('Created VM=%s in Affinity Group=%s' %
+                   (vm.id, ag_list))
 
-   list_vm = list_virtual_machines(self.api_client, id=vm.id)
+        list_vm = list_virtual_machines(self.api_client, id=vm.id)
 
         self.assertEqual(isinstance(list_vm, list), True,
                          "Check list response returns a valid list")

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/component/test_netscaler_configs.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_netscaler_configs.py b/test/integration/component/test_netscaler_configs.py
index 5de0843..d26da47 100644
--- a/test/integration/component/test_netscaler_configs.py
+++ b/test/integration/component/test_netscaler_configs.py
@@ -730,9 +730,9 @@ class TestNetScalerDedicated(cloudstackTestCase):
                             networkofferingid=self.network_offering.id,
                             zoneid=self.zone.id
                         )
-	self.debug("Deploying an instance in account: %s" % self.account_2.account.name)
+        self.debug("Deploying an instance in account: %s" % self.account_2.account.name)
         with self.assertRaises(Exception):
-        	VirtualMachine.create(
+            VirtualMachine.create(
                                   self.apiclient,
                                   self.services["virtual_machine"],
                                   accountid=self.account_2.account.name,
@@ -740,7 +740,7 @@ class TestNetScalerDedicated(cloudstackTestCase):
                                   serviceofferingid=self.service_offering.id,
                                   networkids=[str(self.network.id)]
                                   )
-	self.debug("Deply instacne in dedicated Network offering mode failed")
+        self.debug("Deply instance in dedicated Network offering mode failed")
         return
 
 
@@ -1285,7 +1285,7 @@ class TestNetScalerNoCapacity(cloudstackTestCase):
                                                  )
         if isinstance(physical_networks, list):
             physical_network = physical_networks[0]
-	cls.services["netscaler"]["lbdevicecapacity"] = 2
+        cls.services["netscaler"]["lbdevicecapacity"] = 2
         cls.netscaler = NetScaler.add(
                                   cls.api_client,
                                   cls.services["netscaler"],

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/component/test_shared_networks.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_shared_networks.py b/test/integration/component/test_shared_networks.py
index 6bcfbfd..8f59dfe 100644
--- a/test/integration/component/test_shared_networks.py
+++ b/test/integration/component/test_shared_networks.py
@@ -2098,7 +2098,7 @@ class TestSharedNetworks(cloudstackTestCase):
                          networkofferingid=self.shared_network_offering.id,
                          zoneid=self.zone.id,
                          )
-	    self.cleanup_networks.append(self.network1)
+            self.cleanup_networks.append(self.network1)
             self.fail("Network got created with used vlan id, which is invalid")
         except Exception as e:
             self.debug("Network creation failed because the valn id being used by another network.")

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/component/test_vpc_routers.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_vpc_routers.py b/test/integration/component/test_vpc_routers.py
index 043a01b..3501110 100644
--- a/test/integration/component/test_vpc_routers.py
+++ b/test/integration/component/test_vpc_routers.py
@@ -424,9 +424,9 @@ class TestVPCRoutersBasic(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_02_reboot_router_after_creating_vpc(self):
-	""" Test to reboot the router after creating a VPC
-	"""
-	    # Validate the following 
+        """ Test to reboot the router after creating a VPC
+        """
+        # Validate the following
 	    # 1. Create a VPC with cidr - 10.1.1.1/16
 	    # 2. Reboot the VPC Virtual Router which is created as a result of VPC creation.
         	    # Stop the VPC Router
@@ -473,9 +473,9 @@ class TestVPCRoutersBasic(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_03_destroy_router_after_creating_vpc(self):
-	""" Test to destroy the router after creating a VPC
-	"""
-	    # Validate the following 
+        """ Test to destroy the router after creating a VPC
+	    """
+        # Validate the following
 	    # 1. Create a VPC with cidr - 10.1.1.1/16
 	    # 2. Destroy the VPC Virtual Router which is created as a result of VPC creation.
         self.validate_vpc_offering(self.vpc_off)
@@ -528,15 +528,15 @@ class TestVPCRoutersBasic(cloudstackTestCase):
                          "List Routers should return a valid list"
                          )
         self.migrate_router(routers[0])
-    	return
+        return
 
     @attr(tags=["advanced", "intervlan"])
     def test_05_change_service_offerring_vpc(self):
-	""" Tests to change service offering of the Router after 
-	    creating a vpc
-	"""
+        """ Tests to change service offering of the Router after
+            creating a vpc
+        """
         
-	    # Validate the following 
+        # Validate the following
 	    # 1. Create a VPC with cidr - 10.1.1.1/16
 	    # 2. Change the service offerings of the VPC Virtual Router which is created as a result of VPC creation.
         
@@ -568,7 +568,7 @@ class TestVPCRoutersBasic(cloudstackTestCase):
                                                  )
         self.debug("Changing service offering for the Router %s" % router.id)
         try: 
-	        router = Router.change_service_offering(self.apiclient,
+            router = Router.change_service_offering(self.apiclient,
 				                           router.id,
 				                           service_offering.id
 				                          )
@@ -589,7 +589,7 @@ class TestVPCRoutersBasic(cloudstackTestCase):
                          "Changing service offering failed as id is %s and expected"
                          "is %s" % (router.serviceofferingid, service_offering.id)
                         ) 
-    	return
+        return
 
 class TestVPCRouterOneNetwork(cloudstackTestCase):
 
@@ -748,18 +748,6 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
                                     account=cls.account.name,
                                     domainid=cls.account.domainid
                                   )
-#        cls.assertEqual(
-#                         isinstance(public_ips, list),
-#                         True,
-#                         "List public Ip for network should list the Ip addr"
-#                         )
-#        cls.assertEqual(
-#                         public_ips[0].ipaddress,
-#                         public_ip_2.ipaddress.ipaddress,
-#                         "List public Ip for network should list the Ip addr"
-#                         )
-#
-
         public_ip_3 = PublicIPAddress.create(
                                 cls.apiclient,
                                 accountid=cls.account.name,
@@ -917,8 +905,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
         return
 
     def validate_network_rules(self):
-	""" Validate network rules
-	"""
+        """ Validate network rules
+        """
         vms = VirtualMachine.list(
                                   self.apiclient,
                                   account=self.account.name,
@@ -1014,8 +1002,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_01_start_stop_router_after_addition_of_one_guest_network(self):
-	""" Test start/stop of router after addition of one guest network
-	"""
+        """ Test start/stop of router after addition of one guest network
+	    """
         # Validations
 	    #1. Create a VPC with cidr - 10.1.1.1/16
         #2. Add network1(10.1.1.1/24) to this VPC. 
@@ -1031,7 +1019,6 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
         self.validate_vpc_offering(self.vpc_off)
         self.validate_vpc_network(self.vpc)
-        #self.validate_network_rules()
         self.assertEqual(
                         isinstance(self.gateways, list),
                         True,
@@ -1063,7 +1050,7 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
         cmd.id = router.id
         self.apiclient.stopRouter(cmd)
 	
-	    #List routers to check state of router
+        #List routers to check state of router
         router_response = list_routers(
                                     self.apiclient,
                                     id=router.id
@@ -1082,13 +1069,13 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
         self.debug("Stopped the router with ID: %s" % router.id)
 
-	    # Start The Router
+        # Start The Router
         self.debug("Starting the router with ID: %s" % router.id)
         cmd = startRouter.startRouterCmd()
         cmd.id = router.id
         self.apiclient.startRouter(cmd)
 
-	    #List routers to check state of router
+        #List routers to check state of router
         router_response = list_routers(
                                     self.apiclient,
                                     id=router.id
@@ -1110,8 +1097,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_02_reboot_router_after_addition_of_one_guest_network(self):
-	""" Test reboot of router after addition of one guest network
-	"""
+        """ Test reboot of router after addition of one guest network
+	    """
         # Validations
 	    #1. Create a VPC with cidr - 10.1.1.1/16
         #2. Add network1(10.1.1.1/24) to this VPC. 
@@ -1177,8 +1164,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_03_destroy_router_after_addition_of_one_guest_network(self):
-	""" Test destroy of router after addition of one guest network
-	"""
+        """ Test destroy of router after addition of one guest network
+        """
         # Validations
 	    #1. Create a VPC with cidr - 10.1.1.1/16
         #2. Add network1(10.1.1.1/24) to this VPC. 
@@ -1236,8 +1223,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
 
     @attr(tags=["advanced", "intervlan"])
     def test_04_migrate_router_after_addition_of_one_guest_network(self):
-	""" Test migrate of router after addition of one guest network
-	"""
+        """ Test migrate of router after addition of one guest network
+	    """
         # Validations
 	    #1. Create a VPC with cidr - 10.1.1.1/16
         #2. Add network1(10.1.1.1/24) to this VPC. 
@@ -1275,12 +1262,12 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
                          "List Routers should return a valid list"
                          )
         self.migrate_router(routers[0])
-    	return
+        return
 
     @attr(tags=["advanced", "intervlan"])
     def test_05_chg_srv_off_router_after_addition_of_one_guest_network(self):
-	""" Test to change service offering of router after addition of one guest network
-	"""
+        """ Test to change service offering of router after addition of one guest network
+	    """
         # Validations
 	    #1. Create a VPC with cidr - 10.1.1.1/16
         #2. Add network1(10.1.1.1/24) to this VPC. 
@@ -1332,7 +1319,7 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
                                                  )
         self.debug("Changing service offering for the Router %s" % router.id)
         try: 
-	        router = Router.change_service_offering(self.apiclient,
+            router = Router.change_service_offering(self.apiclient,
 				                           router.id,
 				                           service_offering.id
 				                          )
@@ -1353,5 +1340,4 @@ class TestVPCRouterOneNetwork(cloudstackTestCase):
                          "Changing service offering failed as id is %s and expected"
                          "is %s" % (router.serviceofferingid, service_offering.id)
                         ) 
-    	return
-
+        return

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py
index fc8e716..ab44a2b 100644
--- a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py
+++ b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py
@@ -23,30 +23,30 @@ from nose.plugins.attrib import attr
 
 class Services:
     def __init__(self):
-	self.services = {
-	    "account": {
-		"email": "test@test.com",
-		"firstname": "Test",
-		"lastname": "User",
-		"username": "test",
-		# Random characters are appended for unique
-		# username
-		"password": "password",
-	    },
-	    "service_offering": {
-		"name": "Planner Service Offering",
-		"displaytext": "Planner Service Offering",
-		"cpunumber": 1,
-		"cpuspeed": 100,
-		# in MHz
-		"memory": 128,
-		# In MBs
-	    },
-	    "ostype": 'CentOS 5.3 (64-bit)',
-	    "virtual_machine": {
-		"hypervisor": "XenServer",
-	    }
-	}
+        self.services = {
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "service_offering": {
+                "name": "Planner Service Offering",
+                "displaytext": "Planner Service Offering",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                # in MHz
+                "memory": 128,
+                # In MBs
+            },
+            "ostype": 'CentOS 5.3 (64-bit)',
+            "virtual_machine": {
+                "hypervisor": "XenServer",
+            }
+        }
 
 
 class TestDeployVmWithVariedPlanners(cloudstackTestCase):

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/smoke/test_network.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py
index 121bda0..6788dca 100644
--- a/test/integration/smoke/test_network.py
+++ b/test/integration/smoke/test_network.py
@@ -669,7 +669,7 @@ class TestLoadBalancingRule(cloudstackTestCase):
             self.debug(
                 "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" %
                 (self.vm_1.ipaddress, src_nat_ip_addr.ipaddress)
-                )
+            )
 
             ssh_1 = remoteSSHClient(
                                     src_nat_ip_addr.ipaddress,
@@ -804,20 +804,20 @@ class TestLoadBalancingRule(cloudstackTestCase):
             )
 
 
-	hostnames = []
-	self.try_ssh(src_nat_ip_addr, hostnames)
-	self.try_ssh(src_nat_ip_addr, hostnames)
-	self.try_ssh(src_nat_ip_addr, hostnames)
-	self.try_ssh(src_nat_ip_addr, hostnames)
-	self.try_ssh(src_nat_ip_addr, hostnames)
+        hostnames = []
+        self.try_ssh(src_nat_ip_addr, hostnames)
+        self.try_ssh(src_nat_ip_addr, hostnames)
+        self.try_ssh(src_nat_ip_addr, hostnames)
+        self.try_ssh(src_nat_ip_addr, hostnames)
+        self.try_ssh(src_nat_ip_addr, hostnames)
 
-	self.debug("Hostnames: %s" % str(hostnames))
-	self.assertIn(
+        self.debug("Hostnames: %s" % str(hostnames))
+        self.assertIn(
               self.vm_1.name,
               hostnames,
               "Check if ssh succeeded for server1"
             )
-	self.assertIn(
+        self.assertIn(
               self.vm_2.name,
               hostnames,
               "Check if ssh succeeded for server2"
@@ -826,8 +826,8 @@ class TestLoadBalancingRule(cloudstackTestCase):
         #SSH should pass till there is a last VM associated with LB rule
         lb_rule.remove(self.apiclient, [self.vm_2])
 
-	# making hostnames list empty
-	hostnames[:] = []
+        # making hostnames list empty
+        hostnames[:] = []
 
         try:
             self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
@@ -837,13 +837,11 @@ class TestLoadBalancingRule(cloudstackTestCase):
                                              ))
 
             self.try_ssh(src_nat_ip_addr, hostnames)
-
-	    self.assertIn(
-                      self.vm_1.name,
-                      hostnames,
-                      "Check if ssh succeeded for server1"
-                      )
-
+            self.assertIn(
+                          self.vm_1.name,
+                          hostnames,
+                          "Check if ssh succeeded for server1"
+                          )
         except Exception as e:
             self.fail("%s: SSH failed for VM with IP Address: %s" %
                                         (e, src_nat_ip_addr.ipaddress))
@@ -958,23 +956,23 @@ class TestLoadBalancingRule(cloudstackTestCase):
         )
         try:
             hostnames = []
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-
-	    self.debug("Hostnames: %s" % str(hostnames))
-	    self.assertIn(
-                self.vm_1.name,
-                hostnames,
-                "Check if ssh succeeded for server1"
-                )
-	    self.assertIn(
-                self.vm_2.name,
-                hostnames,
-                "Check if ssh succeeded for server2"
-                )
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+
+            self.debug("Hostnames: %s" % str(hostnames))
+            self.assertIn(
+                    self.vm_1.name,
+                    hostnames,
+                    "Check if ssh succeeded for server1"
+                    )
+            self.assertIn(
+                    self.vm_2.name,
+                    hostnames,
+                    "Check if ssh succeeded for server2"
+                    )
 
             #SSH should pass till there is a last VM associated with LB rule
             lb_rule.remove(self.apiclient, [self.vm_2])
@@ -984,19 +982,16 @@ class TestLoadBalancingRule(cloudstackTestCase):
                            self.non_src_nat_ip.ipaddress.ipaddress,
                            self.vm_2.id
                            ))
-	    # Making host list empty
+            # Making host list empty
             hostnames[:] = []
 
-	    self.try_ssh(self.non_src_nat_ip, hostnames)
-
-	    self.assertIn(
-		self.vm_1.name,
-		hostnames,
-		"Check if ssh succeeded for server1"
-		)
-
+            self.try_ssh(self.non_src_nat_ip, hostnames)
+            self.assertIn(
+            self.vm_1.name,
+            hostnames,
+            "Check if ssh succeeded for server1"
+            )
             self.debug("Hostnames after removing VM2: %s" % str(hostnames))
-
         except Exception as e:
             self.fail("%s: SSH failed for VM with IP Address: %s" %
                       (e, self.non_src_nat_ip.ipaddress.ipaddress))
@@ -1017,7 +1012,6 @@ class TestLoadBalancingRule(cloudstackTestCase):
             ssh_1.execute("hostname")[0]
         return
 
-
 class TestRebootRouter(cloudstackTestCase):
 
     def setUp(self):
@@ -1336,31 +1330,29 @@ class TestAssignRemoveLB(cloudstackTestCase):
                               )
         lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
 
-	hostnames = []
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-
-	self.debug("Hostnames: %s" % str(hostnames))
-	self.assertIn(
-              self.vm_1.name,
-              hostnames,
-              "Check if ssh succeeded for server1"
-            )
-	self.assertIn(
-              self.vm_2.name,
-              hostnames,
-              "Check if ssh succeeded for server2"
-              )
-
+        hostnames = []
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
 
+        self.debug("Hostnames: %s" % str(hostnames))
+        self.assertIn(
+                  self.vm_1.name,
+                  hostnames,
+                  "Check if ssh succeeded for server1"
+                )
+        self.assertIn(
+                  self.vm_2.name,
+                  hostnames,
+                  "Check if ssh succeeded for server2"
+                  )
         #Removing VM and assigning another VM to LB rule
         lb_rule.remove(self.apiclient, [self.vm_2])
 
-	# making hostnames list empty
-	hostnames[:] = []
+        # making hostnames list empty
+        hostnames[:] = []
 
         try:
             self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
@@ -1370,38 +1362,35 @@ class TestAssignRemoveLB(cloudstackTestCase):
                                              ))
             self.try_ssh(self.non_src_nat_ip, hostnames)
 
-	    self.assertIn(
-                      self.vm_1.name,
-                      hostnames,
-                      "Check if ssh succeeded for server1"
-                      )
-
+            self.assertIn(
+                          self.vm_1.name,
+                          hostnames,
+                          "Check if ssh succeeded for server1"
+                          )
         except Exception as e:
             self.fail("SSH failed for VM with IP: %s" %
                                     self.non_src_nat_ip.ipaddress)
 
         lb_rule.assign(self.apiclient, [self.vm_3])
 
-	# Making hostnames list empty
+        # Making hostnames list empty
         hostnames[:] = []
-
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-	self.try_ssh(self.non_src_nat_ip, hostnames)
-
-	self.debug("Hostnames: %s" % str(hostnames))
-	self.assertIn(
-              self.vm_1.name,
-              hostnames,
-              "Check if ssh succeeded for server1"
-            )
-	self.assertIn(
-              self.vm_3.name,
-              hostnames,
-              "Check if ssh succeeded for server3"
-              )
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.try_ssh(self.non_src_nat_ip, hostnames)
+        self.debug("Hostnames: %s" % str(hostnames))
+        self.assertIn(
+                  self.vm_1.name,
+                  hostnames,
+                  "Check if ssh succeeded for server1"
+                )
+        self.assertIn(
+                  self.vm_3.name,
+                  hostnames,
+                  "Check if ssh succeeded for server3"
+                  )
         return
 
 class TestReleaseIP(cloudstackTestCase):

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/c7315975/test/integration/smoke/test_vm_snapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py
index cca4cfb..dd70982 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -29,282 +29,283 @@ class Services:
     """
 
     def __init__(self):
-	self.services = {
-			"account": {
-				    "email": "test@test.com",
-				    "firstname": "Test",
-				    "lastname": "User",
-				    "username": "test",
-				    # Random characters are appended for unique
-				    # username
-				    "password": "password",
-			 },
-			 "service_offering": {
-				    "name": "Tiny Instance",
-				    "displaytext": "Tiny Instance",
-				    "cpunumber": 1,
-				    "cpuspeed": 200,  # in MHz
-				    "memory": 256,  # In MBs
-			},
-			"server": {
-				    "displayname": "TestVM",
-				    "username": "root",
-				    "password": "password",
-				    "ssh_port": 22,
-				    "hypervisor": 'XenServer',
-				    "privateport": 22,
-				    "publicport": 22,
-				    "protocol": 'TCP',
-				},
-			 "mgmt_server": {
-				    "ipaddress": '1.2.2.152',
-				    "username": "root",
-				    "password": "password",
-				    "port": 22,
-				},
-			"templates": {
-				    "displaytext": 'Template',
-				    "name": 'Template',
-				    "ostype": "CentOS 5.3 (64-bit)",
-				    "templatefilter": 'self',
-				},
-			"test_dir": "/tmp",
-			"random_data": "random.data",
-			"snapshot_name":"TestSnapshot",
-			"snapshot_displaytext":"Test",
-			"ostype": "CentOS 5.3 (64-bit)",
-			"sleep": 60,
-			"timeout": 10,
-			"mode": 'advanced',  # Networking mode: Advanced, Basic
-		    }
+        self.services = {
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "service_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 200, # in MHz
+                "memory": 256, # In MBs
+            },
+            "server": {
+                "displayname": "TestVM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "hypervisor": 'XenServer',
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "mgmt_server": {
+                "ipaddress": '1.2.2.152',
+                "username": "root",
+                "password": "password",
+                "port": 22,
+            },
+            "templates": {
+                "displaytext": 'Template',
+                "name": 'Template',
+                "ostype": "CentOS 5.3 (64-bit)",
+                "templatefilter": 'self',
+            },
+            "test_dir": "/tmp",
+            "random_data": "random.data",
+            "snapshot_name": "TestSnapshot",
+            "snapshot_displaytext": "Test",
+            "ostype": "CentOS 5.3 (64-bit)",
+            "sleep": 60,
+            "timeout": 10,
+            "mode": 'advanced', # Networking mode: Advanced, Basic
+        }
 
 class TestVmSnapshot(cloudstackTestCase):
+
     @classmethod
     def setUpClass(cls):
-	cls.api_client = super(TestVmSnapshot, cls).getClsTestClient().getApiClient()
-	cls.services = Services().services
-	# Get Zone, Domain and templates
-	cls.domain = get_domain(cls.api_client, cls.services)
-	cls.zone = get_zone(cls.api_client, cls.services)
-
-	template = get_template(
-			    cls.api_client,
-			    cls.zone.id,
-			    cls.services["ostype"]
-			    )
-	cls.services["domainid"] = cls.domain.id
-	cls.services["server"]["zoneid"] = cls.zone.id
-	cls.services["templates"]["ostypeid"] = template.ostypeid
-	cls.services["zoneid"] = cls.zone.id
-
-	# Create VMs, NAT Rules etc
-	cls.account = Account.create(
-			    cls.api_client,
-			    cls.services["account"],
-			    domainid=cls.domain.id
-			    )
-
-	cls.services["account"] = cls.account.name
-
-	cls.service_offering = ServiceOffering.create(
-					    cls.api_client,
-					    cls.services["service_offering"]
-					    )
-	cls.virtual_machine = VirtualMachine.create(
-				cls.api_client,
-				cls.services["server"],
-				templateid=template.id,
-				accountid=cls.account.name,
-				domainid=cls.account.domainid,
-				serviceofferingid=cls.service_offering.id,
-				mode=cls.services["mode"]
-				)
-	cls.random_data_0 = random_gen(100)
-	cls._cleanup = [
-			cls.service_offering,
-			cls.account,
-			]
-	return
+        cls.api_client = super(TestVmSnapshot, cls).getClsTestClient().getApiClient()
+        cls.services = Services().services
+        # Get Zone, Domain and templates
+        cls.domain = get_domain(cls.api_client, cls.services)
+        cls.zone = get_zone(cls.api_client, cls.services)
+
+        template = get_template(
+                    cls.api_client,
+                    cls.zone.id,
+                    cls.services["ostype"]
+                    )
+        cls.services["domainid"] = cls.domain.id
+        cls.services["server"]["zoneid"] = cls.zone.id
+        cls.services["templates"]["ostypeid"] = template.ostypeid
+        cls.services["zoneid"] = cls.zone.id
+
+        # Create VMs, NAT Rules etc
+        cls.account = Account.create(
+                    cls.api_client,
+                    cls.services["account"],
+                    domainid=cls.domain.id
+                    )
+
+        cls.services["account"] = cls.account.name
+
+        cls.service_offering = ServiceOffering.create(
+                            cls.api_client,
+                            cls.services["service_offering"]
+                            )
+        cls.virtual_machine = VirtualMachine.create(
+                    cls.api_client,
+                    cls.services["server"],
+                    templateid=template.id,
+                    accountid=cls.account.name,
+                    domainid=cls.account.domainid,
+                    serviceofferingid=cls.service_offering.id,
+                    mode=cls.services["mode"]
+                    )
+        cls.random_data_0 = random_gen(100)
+        cls._cleanup = [
+                cls.service_offering,
+                cls.account,
+                ]
+        return
 
     @classmethod
     def tearDownClass(cls):
-	try:
-	    # Cleanup resources used
-	    cleanup_resources(cls.api_client, cls._cleanup)
-	except Exception as e:
-	    raise Exception("Warning: Exception during cleanup : %s" % e)
-	return
+        try:
+            # Cleanup resources used
+            cleanup_resources(cls.api_client, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
 
     def setUp(self):
-	self.apiclient = self.testClient.getApiClient()
-	self.dbclient = self.testClient.getDbConnection()
-	self.cleanup = []
-	return
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+        return
 
     def tearDown(self):
-	try:
-	    # Clean up, terminate the created instance, volumes and snapshots
-	    cleanup_resources(self.apiclient, self.cleanup)
-	except Exception as e:
-	    raise Exception("Warning: Exception during cleanup : %s" % e)
-	return
+        try:
+            # Clean up, terminate the created instance, volumes and snapshots
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
 
     @attr(tags=["advanced", "advancedns", "smoke"])
     def test_01_create_vm_snapshots(self):
-	"""Test to create VM snapshots
-	"""
-
-	try:
-	    # Login to VM and write data to file system
-	    ssh_client = self.virtual_machine.get_ssh_client()
-
-	    cmds = [
-		"echo %s > %s/%s" % (self.random_data_0, self.services["test_dir"], self.services["random_data"]),
-		"cat %s/%s" % (self.services["test_dir"], self.services["random_data"])
-		]
-
-	    for c in cmds:
-		self.debug(c)
-		result = ssh_client.execute(c)
-		self.debug(result)
-
-	except Exception:
-	    self.fail("SSH failed for Virtual machine: %s" %
-			    self.virtual_machine.ipaddress)
-	self.assertEqual(
-			self.random_data_0,
-			result[0],
-			"Check the random data has be write into temp file!"
-			)
-
-	time.sleep(self.services["sleep"])
-
-	vm_snapshot = VmSnapshot.create(
-					self.apiclient,
-					self.virtual_machine.id,
-					"false",
-					self.services["snapshot_name"],
-					self.services["snapshot_displaytext"]
-					)
-	self.assertEqual(
-			vm_snapshot.state,
-			"Ready",
-			"Check the snapshot of vm is ready!"
-			)
-	return
+        """Test to create VM snapshots
+        """
+
+        try:
+            # Login to VM and write data to file system
+            ssh_client = self.virtual_machine.get_ssh_client()
+
+            cmds = [
+                "echo %s > %s/%s" % (self.random_data_0, self.services["test_dir"], self.services["random_data"]),
+                "cat %s/%s" % (self.services["test_dir"], self.services["random_data"])
+            ]
+
+            for c in cmds:
+                self.debug(c)
+                result = ssh_client.execute(c)
+                self.debug(result)
+
+        except Exception:
+            self.fail("SSH failed for Virtual machine: %s" %
+                      self.virtual_machine.ipaddress)
+        self.assertEqual(
+            self.random_data_0,
+            result[0],
+            "Check the random data has be write into temp file!"
+        )
+
+        time.sleep(self.services["sleep"])
+
+        vm_snapshot = VmSnapshot.create(
+            self.apiclient,
+            self.virtual_machine.id,
+            "false",
+            self.services["snapshot_name"],
+            self.services["snapshot_displaytext"]
+        )
+        self.assertEqual(
+            vm_snapshot.state,
+            "Ready",
+            "Check the snapshot of vm is ready!"
+        )
+        return
 
     @attr(tags=["advanced", "advancedns", "smoke"])
     def test_02_revert_vm_snapshots(self):
-	"""Test to revert VM snapshots
-	"""
-
-	try:
-	    ssh_client = self.virtual_machine.get_ssh_client()
-
-	    cmds = [
-		"rm -rf %s/%s" % (self.services["test_dir"], self.services["random_data"]),
-		"ls %s/%s" % (self.services["test_dir"], self.services["random_data"])
-		]
-
-	    for c in cmds:
-		self.debug(c)
-		result = ssh_client.execute(c)
-		self.debug(result)
-
-	except Exception:
-	    self.fail("SSH failed for Virtual machine: %s" %
-			    self.virtual_machine.ipaddress)
-
-	if str(result[0]).index("No such file or directory") == -1:
-	    self.fail("Check the random data has be delete from temp file!")
-
-	time.sleep(self.services["sleep"])
-
-	list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True)
-
-	self.assertEqual(
-			isinstance(list_snapshot_response, list),
-			True,
-			"Check list response returns a valid list"
-			)
-	self.assertNotEqual(
-			    list_snapshot_response,
-			    None,
-			    "Check if snapshot exists in ListSnapshot"
-			    )
-
-	self.assertEqual(
-			list_snapshot_response[0].state,
-			"Ready",
-			"Check the snapshot of vm is ready!"
-			)
-
-	VmSnapshot.revertToSnapshot(self.apiclient,list_snapshot_response[0].id)
-
-	list_vm_response = list_virtual_machines(
-					    self.apiclient,
-					    id=self.virtual_machine.id
-					    )
-
-	self.assertEqual(
-			list_vm_response[0].state,
-			"Stopped",
-			"Check the state of vm is Stopped!"
-			)
-
-	cmd = startVirtualMachine.startVirtualMachineCmd()
-	cmd.id = list_vm_response[0].id
-	self.apiclient.startVirtualMachine(cmd)
-
-	time.sleep(self.services["sleep"])
-
-	try:
-	    ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
-
-	    cmds = [
-		"cat %s/%s" % (self.services["test_dir"], self.services["random_data"])
-		]
-
-	    for c in cmds:
-		self.debug(c)
-		result = ssh_client.execute(c)
-		self.debug(result)
-
-	except Exception:
-	    self.fail("SSH failed for Virtual machine: %s" %
-			    self.virtual_machine.ipaddress)
-
-	self.assertEqual(
-			self.random_data_0,
-			result[0],
-			"Check the random data is equal with the ramdom file!"
-			)
+        """Test to revert VM snapshots
+        """
+
+        try:
+            ssh_client = self.virtual_machine.get_ssh_client()
+
+            cmds = [
+                "rm -rf %s/%s" % (self.services["test_dir"], self.services["random_data"]),
+                "ls %s/%s" % (self.services["test_dir"], self.services["random_data"])
+            ]
+
+            for c in cmds:
+                self.debug(c)
+                result = ssh_client.execute(c)
+                self.debug(result)
+
+        except Exception:
+            self.fail("SSH failed for Virtual machine: %s" %
+                      self.virtual_machine.ipaddress)
+
+        if str(result[0]).index("No such file or directory") == -1:
+            self.fail("Check the random data has be delete from temp file!")
+
+        time.sleep(self.services["sleep"])
+
+        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+
+        self.assertEqual(
+            isinstance(list_snapshot_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+        self.assertNotEqual(
+            list_snapshot_response,
+            None,
+            "Check if snapshot exists in ListSnapshot"
+        )
+
+        self.assertEqual(
+            list_snapshot_response[0].state,
+            "Ready",
+            "Check the snapshot of vm is ready!"
+        )
+
+        VmSnapshot.revertToSnapshot(self.apiclient, list_snapshot_response[0].id)
+
+        list_vm_response = list_virtual_machines(
+            self.apiclient,
+            id=self.virtual_machine.id
+        )
+
+        self.assertEqual(
+            list_vm_response[0].state,
+            "Stopped",
+            "Check the state of vm is Stopped!"
+        )
+
+        cmd = startVirtualMachine.startVirtualMachineCmd()
+        cmd.id = list_vm_response[0].id
+        self.apiclient.startVirtualMachine(cmd)
+
+        time.sleep(self.services["sleep"])
+
+        try:
+            ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
+
+            cmds = [
+                "cat %s/%s" % (self.services["test_dir"], self.services["random_data"])
+            ]
+
+            for c in cmds:
+                self.debug(c)
+                result = ssh_client.execute(c)
+                self.debug(result)
+
+        except Exception:
+            self.fail("SSH failed for Virtual machine: %s" %
+                      self.virtual_machine.ipaddress)
+
+        self.assertEqual(
+            self.random_data_0,
+            result[0],
+            "Check the random data is equal with the ramdom file!"
+        )
 
     @attr(tags=["advanced", "advancedns", "smoke"])
     def test_03_delete_vm_snapshots(self):
-	"""Test to delete vm snapshots
-	"""
-
-	list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True)
-
-	self.assertEqual(
-			isinstance(list_snapshot_response, list),
-			True,
-			"Check list response returns a valid list"
-			)
-	self.assertNotEqual(
-			    list_snapshot_response,
-			    None,
-			    "Check if snapshot exists in ListSnapshot"
-			    )
-	VmSnapshot.deleteVMSnapshot(self.apiclient,list_snapshot_response[0].id)
-
-	time.sleep(self.services["sleep"]*3)
-
-	list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True)
-
-	self.assertEqual(
-			list_snapshot_response,
-			None,
-			"Check list vm snapshot has be deleted"
-			)
+        """Test to delete vm snapshots
+        """
+
+        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+
+        self.assertEqual(
+            isinstance(list_snapshot_response, list),
+            True,
+            "Check list response returns a valid list"
+        )
+        self.assertNotEqual(
+            list_snapshot_response,
+            None,
+            "Check if snapshot exists in ListSnapshot"
+        )
+        VmSnapshot.deleteVMSnapshot(self.apiclient, list_snapshot_response[0].id)
+
+        time.sleep(self.services["sleep"] * 3)
+
+        list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True)
+
+        self.assertEqual(
+            list_snapshot_response,
+            None,
+            "Check list vm snapshot has be deleted"
+        )


[19/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Don't report back resource state to ResourceManagerImpl

on adding sec. storage no need to report back to resourceManager since
the sc. storage is no longer a directly connected host.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/0dc5b0d2
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/0dc5b0d2
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/0dc5b0d2

Branch: refs/heads/vmsync
Commit: 0dc5b0d29af13786626e43907894877fdbc708f9
Parents: f1134da
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Wed Jun 26 19:03:24 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:53:41 2013 +0530

----------------------------------------------------------------------
 .../agent/manager/MockAgentManagerImpl.java     | 42 ++++++++------------
 .../agent/manager/MockStorageManagerImpl.java   |  6 +--
 2 files changed, 18 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0dc5b0d2/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java
index 8542de3..69efc83 100755
--- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java
+++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java
@@ -16,24 +16,6 @@
 // under the License.
 package com.cloud.agent.manager;
 
-import java.security.NoSuchAlgorithmException;
-import java.security.SecureRandom;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.PatternSyntaxException;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.log4j.Logger;
-
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.CheckHealthCommand;
@@ -45,7 +27,6 @@ import com.cloud.agent.api.HostStatsEntry;
 import com.cloud.agent.api.MaintainAnswer;
 import com.cloud.agent.api.PingTestCommand;
 import com.cloud.dc.dao.HostPodDao;
-import com.cloud.host.Host;
 import com.cloud.resource.AgentResourceBase;
 import com.cloud.resource.AgentRoutingResource;
 import com.cloud.resource.AgentStorageResource;
@@ -62,8 +43,24 @@ import com.cloud.utils.db.DB;
 import com.cloud.utils.db.Transaction;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.net.NetUtils;
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.PatternSyntaxException;
+
 @Component
 @Local(value = { MockAgentManager.class })
 public class MockAgentManagerImpl extends ManagerBase implements MockAgentManager {
@@ -195,9 +192,6 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage
             random = SecureRandom.getInstance("SHA1PRNG");
             _executor = new ThreadPoolExecutor(1, 5, 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(),
                     new NamedThreadFactory("Simulator-Agent-Mgr"));
-            // ComponentLocator locator = ComponentLocator.getCurrentLocator();
-            // _simulatorMgr = (SimulatorManager)
-            // locator.getComponent(SimulatorManager.Name);
         } catch (NoSuchAlgorithmException e) {
             s_logger.debug("Failed to initialize random:" + e.toString());
             return false;
@@ -330,10 +324,6 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage
                     details.put("guid", this.guid);
                     storageResource.configure("secondaryStorage", params);
                     storageResource.start();
-                    // on the simulator the ssvm is as good as a direct
-                    // agent
-                    _resourceMgr.addHost(mockHost.getDataCenterId(), storageResource, Host.Type.SecondaryStorageVM,
-                            details);
                     _resources.put(this.guid, storageResource);
                 } catch (ConfigurationException e) {
                     s_logger.debug("Failed to load secondary storage resource: " + e.toString());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/0dc5b0d2/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
index bc5aa58..c81f079 100644
--- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
+++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
@@ -889,13 +889,12 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
     @Override
     public void preinstallTemplates(String url, long zoneId) {
         MockSecStorageVO storage = null;
-        Transaction txn = Transaction.open(Transaction.SIMULATOR_DB);
         try {
             storage = _mockSecStorageDao.findByUrl(url);
         } catch (Exception ex) {
             throw new CloudRuntimeException("Unable to find sec storage at " + url, ex);
         } finally {
-            txn = Transaction.open(Transaction.CLOUD_DB);
+            Transaction txn = Transaction.open(Transaction.CLOUD_DB);
             txn.close();
         }
         if (storage == null) {
@@ -916,7 +915,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
             storage.setCapacity(DEFAULT_HOST_STORAGE_SIZE);
 
             storage.setMountPoint(dir);
-            txn = Transaction.open(Transaction.SIMULATOR_DB);
+            Transaction txn = Transaction.open(Transaction.SIMULATOR_DB);
             try {
                 txn.start();
                 storage = _mockSecStorageDao.persist(storage);
@@ -974,7 +973,6 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
                 txn.close();
             }
         }
-
     }
 
     @Override


[40/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3300: Adding patches from cloudstack-1313


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b68cc334
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b68cc334
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b68cc334

Branch: refs/heads/vmsync
Commit: b68cc3346a8b7c4e49c20d5dbe8e973cbbd6cbce
Parents: 0304034
Author: Radhika PC <ra...@citrix.com>
Authored: Mon Feb 18 16:55:35 2013 +0530
Committer: Sebastien Goasguen <ru...@gmail.com>
Committed: Mon Jul 1 12:01:39 2013 +0200

----------------------------------------------------------------------
 docs/en-US/attaching-volume.xml                 | 57 +++++++----
 docs/en-US/creating-new-volumes.xml             | 99 ++++++++++++--------
 docs/en-US/detach-move-volumes.xml              | 58 +++++++-----
 docs/en-US/storage.xml                          | 15 ++-
 docs/en-US/upload-existing-volume-to-vm.xml     | 90 +++++++++++++++++-
 docs/en-US/vm-storage-migration.xml             | 27 +++---
 .../volume-deletion-garbage-collection.xml      | 29 ++++--
 docs/en-US/working-with-volumes.xml             | 51 +++++-----
 8 files changed, 291 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/attaching-volume.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/attaching-volume.xml b/docs/en-US/attaching-volume.xml
index 360555e..7511ec3 100644
--- a/docs/en-US/attaching-volume.xml
+++ b/docs/en-US/attaching-volume.xml
@@ -21,24 +21,41 @@
  specific language governing permissions and limitations
  under the License.
 -->
-
 <section id="attaching-volume">
-    <title>Attaching a Volume</title>
-    <para>You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when you first create a new volume, when you are moving an existing volume from one VM to another, or after you have migrated a volume from one storage pool to another.</para>
-        <orderedlist>
-            <listitem><para>Log in to the &PRODUCT; UI as a user or admin.</para></listitem>
-            <listitem><para>In the left navigation, click Storage.</para></listitem>
-            <listitem><para>In Select View, choose Volumes.</para></listitem>
-            <listitem><para>4.    Click the volume name in the Volumes list, then click the Attach Disk button<inlinemediaobject>
-                <imageobject>
-                    <imagedata fileref="./images/attach-disk-icon.png" />
-                </imageobject>
-                <textobject><phrase>AttachDiskButton.png: button to attach a volume</phrase></textobject>
-            </inlinemediaobject>
-                </para></listitem>
-            <listitem><para>In the Instance popup, choose the VM to which you want to attach the volume. You will only see instances to which you are allowed to attach volumes; for example, a user will see only instances created by that user, but the administrator will have more choices.</para>
-                <!-- <para>If the VM is running in the OVM hypervisor, the VM must be stopped before a new volume can be attached to it.</para> -->
-            </listitem> 
-            <listitem><para>When the volume has been attached, you should be able to see it by clicking Instances, the instance name, and View Volumes.</para></listitem>
-        </orderedlist>
-    </section>
+  <title>Attaching a Volume</title>
+  <para>You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when
+    you first create a new volume, when you are moving an existing volume from one VM to another, or
+    after you have migrated a volume from one storage pool to another.</para>
+  <orderedlist>
+    <listitem>
+      <para>Log in to the &PRODUCT; UI as a user or admin.</para>
+    </listitem>
+    <listitem>
+      <para>In the left navigation, click Storage.</para>
+    </listitem>
+    <listitem>
+      <para>In Select View, choose Volumes.</para>
+    </listitem>
+    <listitem>
+      <para>4. Click the volume name in the Volumes list, then click the Attach Disk button<inlinemediaobject>
+          <imageobject>
+            <imagedata fileref="./images/attach-disk-icon.png"/>
+          </imageobject>
+          <textobject>
+            <phrase>AttachDiskButton.png: button to attach a volume</phrase>
+          </textobject>
+        </inlinemediaobject>
+      </para>
+    </listitem>
+    <listitem>
+      <para>In the Instance popup, choose the VM to which you want to attach the volume. You will
+        only see instances to which you are allowed to attach volumes; for example, a user will see
+        only instances created by that user, but the administrator will have more choices.</para>
+      <!-- <para>If the VM is running in the OVM hypervisor, the VM must be stopped before a new volume can be attached to it.</para> -->
+    </listitem>
+    <listitem>
+      <para>When the volume has been attached, you should be able to see it by clicking Instances,
+        the instance name, and View Volumes.</para>
+    </listitem>
+  </orderedlist>
+</section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/creating-new-volumes.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/creating-new-volumes.xml b/docs/en-US/creating-new-volumes.xml
index 5a12d7f..5440dc5 100644
--- a/docs/en-US/creating-new-volumes.xml
+++ b/docs/en-US/creating-new-volumes.xml
@@ -20,44 +20,65 @@
 	KIND, either express or implied.  See the License for the
 	specific language governing permissions and limitations
 	under the License.
--->	
+-->
 <section id="creating-new-volumes">
-	<title>Creating a New Volume</title>
-	<para>You can add more data disk volumes to a guest VM at any time, up to the limits of your storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage resources are not allocated on the physical storage device until you attach the volume. This optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it when the first attachment is made.</para>
-	<section id="local-storage-data-volumes">
-		<title>Using Local Storage for Data Volumes</title>
-	    <para>You can  create data volumes on local storage (supported with XenServer, KVM, and VMware).
-		    The data volume is placed on the same
-			host as the VM instance that is attached to the data volume. These
-			local data volumes can be attached to virtual machines, detached, re-attached,
-			and deleted just as with the other types of data volume.</para>
-		<para>Local storage is ideal for scenarios where persistence of data volumes and HA
-			is not required. Some of the benefits include reduced disk I/O latency and cost
-			reduction from using inexpensive local disks.</para>
-		<para>In order for local volumes to be used, the feature must be enabled for the
-			zone.</para>
-		<para>You can create a data disk offering for local storage. When a user creates a
-			new VM, they can select this disk offering in order to cause the data disk
-			volume to be placed in local storage.</para>
-		<para>You can not migrate a VM that has a volume in local storage to a different
-			host, nor migrate the volume itself away to a different host. If you want to put
-			a host into maintenance mode, you must first stop any VMs with local data
-			volumes on that host.</para>
-	</section>
-	<section id="creating-new-volume-steps">
-		<title>To Create a New Volume</title>
-				<orderedlist>
-				<listitem><para>Log in to the &PRODUCT; UI as a user or admin.</para></listitem>
-				<listitem><para>In the left navigation bar, click Storage.</para></listitem>
-				<listitem><para>In Select View, choose Volumes.</para></listitem>
-				<listitem><para>To create a new volume, click Add Volume, provide the following details, and click OK.</para>
-				<itemizedlist>
-					<listitem><para>Name. Give the volume a unique name so you can find it later.</para></listitem>
-					<listitem><para>Availability Zone. Where do you want the storage to reside? This should be close to the VM that will use the volume.</para></listitem>
-					<listitem><para>Disk Offering. Choose the characteristics of the storage.</para></listitem>
-				</itemizedlist>
-				<para>The new volume appears in the list of volumes with the state “Allocated.” The volume data is stored in &PRODUCT;, but the volume is not yet ready for use</para></listitem>
-				<listitem><para>To start using the volume, continue to Attaching a Volume </para></listitem>
-			</orderedlist>
-	</section>
+  <title>Creating a New Volume</title>
+  <para>You can add more data disk volumes to a guest VM at any time, up to the limits of your
+    storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When
+    you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage
+    resources are not allocated on the physical storage device until you attach the volume. This
+    optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it
+    when the first attachment is made.</para>
+  <section id="local-storage-data-volumes">
+    <title>Using Local Storage for Data Volumes</title>
+    <para>You can create data volumes on local storage (supported with XenServer, KVM, and VMware).
+      The data volume is placed on the same host as the VM instance that is attached to the data
+      volume. These local data volumes can be attached to virtual machines, detached, re-attached,
+      and deleted just as with the other types of data volume.</para>
+    <para>Local storage is ideal for scenarios where persistence of data volumes and HA is not
+      required. Some of the benefits include reduced disk I/O latency and cost reduction from using
+      inexpensive local disks.</para>
+    <para>In order for local volumes to be used, the feature must be enabled for the zone.</para>
+    <para>You can create a data disk offering for local storage. When a user creates a new VM, they
+      can select this disk offering in order to cause the data disk volume to be placed in local
+      storage.</para>
+    <para>You can not migrate a VM that has a volume in local storage to a different host, nor
+      migrate the volume itself away to a different host. If you want to put a host into maintenance
+      mode, you must first stop any VMs with local data volumes on that host.</para>
+  </section>
+  <section id="creating-new-volume-steps">
+    <title>To Create a New Volume</title>
+    <orderedlist>
+      <listitem>
+        <para>Log in to the &PRODUCT; UI as a user or admin.</para>
+      </listitem>
+      <listitem>
+        <para>In the left navigation bar, click Storage.</para>
+      </listitem>
+      <listitem>
+        <para>In Select View, choose Volumes.</para>
+      </listitem>
+      <listitem>
+        <para>To create a new volume, click Add Volume, provide the following details, and click
+          OK.</para>
+        <itemizedlist>
+          <listitem>
+            <para>Name. Give the volume a unique name so you can find it later.</para>
+          </listitem>
+          <listitem>
+            <para>Availability Zone. Where do you want the storage to reside? This should be close
+              to the VM that will use the volume.</para>
+          </listitem>
+          <listitem>
+            <para>Disk Offering. Choose the characteristics of the storage.</para>
+          </listitem>
+        </itemizedlist>
+        <para>The new volume appears in the list of volumes with the state “Allocated.” The volume
+          data is stored in &PRODUCT;, but the volume is not yet ready for use</para>
+      </listitem>
+      <listitem>
+        <para>To start using the volume, continue to Attaching a Volume </para>
+      </listitem>
+    </orderedlist>
+  </section>
 </section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/detach-move-volumes.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/detach-move-volumes.xml b/docs/en-US/detach-move-volumes.xml
index fda6e66..7103c30 100644
--- a/docs/en-US/detach-move-volumes.xml
+++ b/docs/en-US/detach-move-volumes.xml
@@ -22,25 +22,39 @@
 	under the License.
 -->
 <section id="detach-move-volumes">
-	<title>Detaching and Moving Volumes</title>
-		<note><para>This procedure is different from moving disk volumes from one storage pool to another. See VM Storage Migration </para></note>
-		<para>A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT; administrators and users can detach volumes from VMs and move them to other VMs.</para>
-		<para>If the two VMs are in different clusters, and the volume is large, it may take several minutes for the volume to be moved to the new VM.</para>
-        <!-- <para>If the destination VM is running in the OVM hypervisor, the VM must be stopped before a new volume can be attached to it.</para> -->
-		<orderedlist>
-			<listitem><para>Log in to the &PRODUCT; UI as a user or admin.</para></listitem>
-			<listitem><para>In the left navigation bar, click Storage, and choose Volumes in Select View. Alternatively, if you know which VM the volume is attached to, you can click Instances, click the VM name, and click View Volumes.</para></listitem>
-			<listitem><para>Click the name of the volume you want to detach, then click the Detach Disk button. <inlinemediaobject>
-					<imageobject>
-						<imagedata fileref="./images/detach-disk-icon.png"/>
-					</imageobject>
-					<textobject>
-						<phrase>DetachDiskButton.png: button to detach a volume</phrase>
-					</textobject>
-				</inlinemediaobject>
-			</para></listitem>
-			<listitem><para>To move the volume to another VM, follow the steps in <xref linkend="attaching-volume"
-				/>.</para></listitem>
-		</orderedlist>
-	</section>
-
+  <title>Detaching and Moving Volumes</title>
+  <note>
+    <para>This procedure is different from moving disk volumes from one storage pool to another. See
+      VM Storage Migration </para>
+  </note>
+  <para>A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT;
+    administrators and users can detach volumes from VMs and move them to other VMs.</para>
+  <para>If the two VMs are in different clusters, and the volume is large, it may take several
+    minutes for the volume to be moved to the new VM.</para>
+  <!-- <para>If the destination VM is running in the OVM hypervisor, the VM must be stopped before a new volume can be attached to it.</para> -->
+  <orderedlist>
+    <listitem>
+      <para>Log in to the &PRODUCT; UI as a user or admin.</para>
+    </listitem>
+    <listitem>
+      <para>In the left navigation bar, click Storage, and choose Volumes in Select View.
+        Alternatively, if you know which VM the volume is attached to, you can click Instances,
+        click the VM name, and click View Volumes.</para>
+    </listitem>
+    <listitem>
+      <para>Click the name of the volume you want to detach, then click the Detach Disk button. <inlinemediaobject>
+          <imageobject>
+            <imagedata fileref="./images/detach-disk-icon.png"/>
+          </imageobject>
+          <textobject>
+            <phrase>DetachDiskButton.png: button to detach a volume</phrase>
+          </textobject>
+        </inlinemediaobject>
+      </para>
+    </listitem>
+    <listitem>
+      <para>To move the volume to another VM, follow the steps in <xref linkend="attaching-volume"
+        />.</para>
+    </listitem>
+  </orderedlist>
+</section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/storage.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/storage.xml b/docs/en-US/storage.xml
index 580fe59..3ef7324 100644
--- a/docs/en-US/storage.xml
+++ b/docs/en-US/storage.xml
@@ -1,5 +1,5 @@
 <?xml version='1.0' encoding='utf-8' ?>
-<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
 <!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
 %BOOK_ENTITIES;
 ]>
@@ -21,12 +21,11 @@
     specific language governing permissions and limitations
     under the License.
 -->
-
 <chapter id="storage">
-	<title>Working With Storage</title>
-    <xi:include href="storage-overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-    <xi:include href="primary-storage.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-    <xi:include href="secondary-storage.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-    <xi:include href="working-with-volumes.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-    <xi:include href="working-with-snapshots.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+  <title>Working With Storage</title>
+  <xi:include href="storage-overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="primary-storage.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="secondary-storage.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="working-with-volumes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="working-with-snapshots.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
 </chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/upload-existing-volume-to-vm.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/upload-existing-volume-to-vm.xml b/docs/en-US/upload-existing-volume-to-vm.xml
index d2b6571..6be43f8 100644
--- a/docs/en-US/upload-existing-volume-to-vm.xml
+++ b/docs/en-US/upload-existing-volume-to-vm.xml
@@ -21,8 +21,8 @@
  specific language governing permissions and limitations
  under the License.
 -->
-
 <section id="upload-existing-volume-to-vm">
+<<<<<<< HEAD
     <title>Uploading an Existing Volume to a Virtual Machine</title>
         <para>Existing data can be made accessible to a virtual machine. This is called uploading a volume to the VM. For example, this is useful to upload data from a local file system and attach it to a VM. Root administrators, domain administrators, and end users can all upload existing volumes to VMs.</para>
         <para>The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary storage</para>
@@ -73,4 +73,92 @@
         </listitem>
         <listitem><para>Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step <xref linkend="optional-hash"/>, and make sure the status is Uploaded.</para></listitem>
     </orderedlist>
+=======
+  <title>Uploading an Existing Volume to a Virtual Machine</title>
+  <para>Existing data can be made accessible to a virtual machine. This is called uploading a volume
+    to the VM. For example, this is useful to upload data from a local file system and attach it to
+    a VM. Root administrators, domain administrators, and end users can all upload existing volumes
+    to VMs.</para>
+  <para>The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary
+    storage</para>
+  <para>You cannot upload a volume if the preconfigured volume limit has already been reached. The
+    default limit for the cloud is set in the global configuration parameter max.account.volumes,
+    but administrators can also set per-domain limits that are different from the global default.
+    See Setting Usage Limits </para>
+  <para>To upload a volume:</para>
+  <orderedlist>
+    <listitem>
+      <para>(Optional) Create an MD5 hash (checksum) of the disk image file that you are going to
+        upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data
+        corruption has occurred.</para>
+    </listitem>
+    <listitem>
+      <para>Log in to the &PRODUCT; UI as an administrator or user</para>
+    </listitem>
+    <listitem>
+      <para>In the left navigation bar, click Storage.</para>
+    </listitem>
+    <listitem>
+      <para>Click Upload Volume.</para>
+    </listitem>
+    <listitem>
+      <para>Provide the following:</para>
+      <itemizedlist>
+        <listitem>
+          <para>Name and Description. Any desired name and a brief description that can be shown in
+            the UI.</para>
+        </listitem>
+        <listitem>
+          <para>Availability Zone. Choose the zone where you want to store the volume. VMs running
+            on hosts in this zone can attach the volume.</para>
+        </listitem>
+        <listitem>
+          <para>Format. Choose one of the following to indicate the disk image format of the
+            volume.</para>
+          <informaltable>
+            <tgroup cols="2" align="left" colsep="1" rowsep="1">
+              <thead>
+                <row>
+                  <entry><para>Hypervisor</para></entry>
+                  <entry><para>Disk Image Format</para></entry>
+                </row>
+              </thead>
+              <tbody>
+                <row>
+                  <entry><para>XenServer</para></entry>
+                  <entry><para>VHD</para></entry>
+                </row>
+                <row>
+                  <entry><para>VMware</para></entry>
+                  <entry><para>OVA</para></entry>
+                </row>
+                <row>
+                  <entry><para>KVM</para></entry>
+                  <entry><para>QCOW2</para></entry>
+                </row>
+                <!--                        <row>
+                            <entry><para>OVM</para></entry>
+                            <entry><para>RAW</para></entry>
+                        </row> -->
+              </tbody>
+            </tgroup>
+          </informaltable>
+        </listitem>
+        <listitem>
+          <para>URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The
+            type of file at the URL must match the value chosen in Format. For example, if Format is
+            VHD, the URL might look like the following:</para>
+          <para>http://yourFileServerIP/userdata/myDataDisk.vhd</para>
+        </listitem>
+        <listitem>
+          <para>MD5 checksum. (Optional) Use the hash that you created in step 1.</para>
+        </listitem>
+      </itemizedlist>
+    </listitem>
+    <listitem>
+      <para>Wait until the status of the volume shows that the upload is complete. Click Instances -
+        Volumes, find the name you specified in step 5, and make sure the status is Uploaded.</para>
+    </listitem>
+  </orderedlist>
+>>>>>>> 9cb9f45... CLOUDSTACK-1313
 </section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/vm-storage-migration.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/vm-storage-migration.xml b/docs/en-US/vm-storage-migration.xml
index 7c3824b..e0dad57 100644
--- a/docs/en-US/vm-storage-migration.xml
+++ b/docs/en-US/vm-storage-migration.xml
@@ -22,14 +22,19 @@
     under the License.
 -->
 <section id="vm-storage-migration">
-    <title>VM Storage Migration</title>
-	<para>Supported in XenServer, KVM, and VMware.</para>
-    <note><para>This procedure is different from moving disk volumes from one VM to another. See Detaching and Moving Volumes <xref linkend="detach-move-volumes" />.</para>
-        </note>
-    <para></para>
-	<para>You can migrate a virtual machine’s root disk volume or any additional data disk volume from one storage pool to another in the same zone.</para>
-	<para>You can use the storage migration feature to achieve some commonly desired administration goals, such as balancing the load on storage pools and increasing the reliability of virtual machines by moving them away from any storage  pool that is experiencing  issues.</para>
-    <xi:include href="migrate-datadisk-volume-new-storage-pool.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-    <xi:include href="migrate-vm-rootvolume-volume-new-storage-pool.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
-	</section>
-
+  <title>VM Storage Migration</title>
+  <para>Supported in XenServer, KVM, and VMware.</para>
+  <note>
+    <para>This procedure is different from moving disk volumes from one VM to another. See Detaching
+      and Moving Volumes <xref linkend="detach-move-volumes"/>.</para>
+  </note>
+  <para>You can migrate a virtual machine’s root disk volume or any additional data disk volume from
+    one storage pool to another in the same zone.</para>
+  <para>You can use the storage migration feature to achieve some commonly desired administration
+    goals, such as balancing the load on storage pools and increasing the reliability of virtual
+    machines by moving them away from any storage pool that is experiencing issues.</para>
+  <xi:include href="migrate-datadisk-volume-new-storage-pool.xml"
+    xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="migrate-vm-rootvolume-volume-new-storage-pool.xml"
+    xmlns:xi="http://www.w3.org/2001/XInclude"/>
+</section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/volume-deletion-garbage-collection.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/volume-deletion-garbage-collection.xml b/docs/en-US/volume-deletion-garbage-collection.xml
index d162d84..4186438 100644
--- a/docs/en-US/volume-deletion-garbage-collection.xml
+++ b/docs/en-US/volume-deletion-garbage-collection.xml
@@ -21,15 +21,24 @@
  specific language governing permissions and limitations
  under the License.
 -->
-
 <section id="volume-deletion-garbage-collection">
-    <title>Volume Deletion and Garbage Collection</title>
-    <para>The deletion of a volume does not delete the snapshots that have been created from the volume</para>
-    <para>When a VM is destroyed, data disk volumes that are attached to the VM are not deleted.</para>
-    <para>Volumes are permanently destroyed using a garbage collection process.  The global configuration variables expunge.delay and expunge.interval determine when the physical deletion of volumes will occur.</para>
-    <itemizedlist>
-        <listitem><para>expunge.delay: determines how old the volume must be before it is destroyed, in seconds</para></listitem>
-        <listitem><para>expunge.interval: determines how often to run the garbage collection check</para></listitem>
-    </itemizedlist>
-    <para>Administrators should adjust these values depending on site policies around data retention.</para>
+  <title>Volume Deletion and Garbage Collection</title>
+  <para>The deletion of a volume does not delete the snapshots that have been created from the
+    volume</para>
+  <para>When a VM is destroyed, data disk volumes that are attached to the VM are not
+    deleted.</para>
+  <para>Volumes are permanently destroyed using a garbage collection process. The global
+    configuration variables expunge.delay and expunge.interval determine when the physical deletion
+    of volumes will occur.</para>
+  <itemizedlist>
+    <listitem>
+      <para>expunge.delay: determines how old the volume must be before it is destroyed, in
+        seconds</para>
+    </listitem>
+    <listitem>
+      <para>expunge.interval: determines how often to run the garbage collection check</para>
+    </listitem>
+  </itemizedlist>
+  <para>Administrators should adjust these values depending on site policies around data
+    retention.</para>
 </section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b68cc334/docs/en-US/working-with-volumes.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/working-with-volumes.xml b/docs/en-US/working-with-volumes.xml
index ab567d2..6832cff 100644
--- a/docs/en-US/working-with-volumes.xml
+++ b/docs/en-US/working-with-volumes.xml
@@ -21,29 +21,32 @@
     specific language governing permissions and limitations
     under the License.
 -->
-
 <section id="working-with-volumes">
-    <title>Using Swift for Secondary Storage</title>
-    <para>A volume provides storage to a guest VM.  The volume can provide for
-      a root disk or an additional data disk.  &PRODUCT; supports additional
-      volumes for guest VMs.
-    </para>
-    <para>Volumes are created for a specific hypervisor type. A volume that has
-      been attached to guest using one hypervisor type (e.g, XenServer) may not
-      be attached to a guest that is using another hypervisor type (e.g. 
-      vSphere, KVM).  This is because the different hypervisors use
-      different disk image formats.
-    </para>
-    <para>&PRODUCT; defines a volume as a unit of storage available to a guest
-      VM. Volumes are either root disks or data disks. The root disk has "/"
-      in the file system and is usually the boot device. Data disks provide
-      for additional storage (e.g. As "/opt" or "D:"). Every guest VM has a root
-      disk, and VMs can also optionally have a data disk. End users can mount
-      multiple data disks to guest VMs. Users choose data disks from the disk
-      offerings created by administrators. The user can create a template from
-      a volume as well; this is the standard procedure for private template
-      creation. Volumes are hypervisor-specific: a volume from one hypervisor
-      type may not be used on a guest of another hypervisor type.
-    </para>    
+  <title>Working With Volumes</title>
+  <para>A volume provides storage to a guest VM. The volume can provide for a root disk or an
+    additional data disk. &PRODUCT; supports additional volumes for guest VMs. </para>
+  <para>Volumes are created for a specific hypervisor type. A volume that has been attached to guest
+    using one hypervisor type (e.g, XenServer) may not be attached to a guest that is using another
+    hypervisor type, for example:vSphere, KVM. This is because the different hypervisors use different
+    disk image formats. </para>
+  <para>&PRODUCT; defines a volume as a unit of storage available to a guest VM. Volumes are either
+    root disks or data disks. The root disk has "/" in the file system and is usually the boot
+    device. Data disks provide for additional storage, for example: "/opt" or "D:". Every guest VM
+    has a root disk, and VMs can also optionally have a data disk. End users can mount multiple data
+    disks to guest VMs. Users choose data disks from the disk offerings created by administrators.
+    The user can create a template from a volume as well; this is the standard procedure for private
+    template creation. Volumes are hypervisor-specific: a volume from one hypervisor type may not be
+    used on a guest of another hypervisor type. </para>
+  <note>
+    <para>&PRODUCT; supports attaching up to 13 data disks to a VM on XenServer hypervisor versions
+      6.0 and above. For the VMs on other hypervisor types, the data disk limit is 6.</para>
+  </note>
+  <xi:include href="creating-new-volumes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="upload-existing-volume-to-vm.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="attaching-volume.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="detach-move-volumes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="vm-storage-migration.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="resizing-volumes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="volume-deletion-garbage-collection.xml"
+    xmlns:xi="http://www.w3.org/2001/XInclude"/>
 </section>
-


[48/50] [abbrv] Another merge from master. This is just getting laborious

Posted by ah...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/AddSspCmd.java
----------------------------------------------------------------------
diff --cc plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/AddSspCmd.java
index 0000000,d42ee67..8aa0761
mode 000000,100644..100644
--- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/AddSspCmd.java
+++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/AddSspCmd.java
@@@ -1,0 -1,138 +1,139 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.api.commands;
+ import javax.inject.Inject;
+ 
++import org.apache.log4j.Logger;
++
+ import org.apache.cloudstack.api.APICommand;
+ import org.apache.cloudstack.api.ApiConstants;
+ import org.apache.cloudstack.api.BaseCmd;
++import org.apache.cloudstack.api.Parameter;
+ import org.apache.cloudstack.api.response.SspResponse;
+ import org.apache.cloudstack.api.response.ZoneResponse;
 -import org.apache.cloudstack.api.Parameter;
++import org.apache.cloudstack.context.CallContext;
+ import org.apache.cloudstack.network.element.SspService;
 -import org.apache.log4j.Logger;
+ 
+ import com.cloud.dc.dao.DataCenterDao;
+ import com.cloud.exception.ConcurrentOperationException;
+ import com.cloud.exception.InsufficientCapacityException;
+ import com.cloud.exception.NetworkRuleConflictException;
+ import com.cloud.exception.ResourceAllocationException;
+ import com.cloud.exception.ResourceUnavailableException;
+ import com.cloud.host.Host;
 -import com.cloud.user.UserContext;
+ 
+ 
+ @APICommand(name="addStratosphereSsp", responseObject=SspResponse.class, description="Adds stratosphere ssp server")
+ public class AddSspCmd extends BaseCmd {
+     private static final Logger s_logger = Logger.getLogger(AddSspCmd.class.getName());
+     @Inject
+     SspService _service;
+     @Inject
+     DataCenterDao _dcDao;
+ 
+     @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType=ZoneResponse.class,
+             required=true, description="the zone ID")
+     private Long zoneId;
+ 
+     @Parameter(name=ApiConstants.URL, type=CommandType.STRING, required=true, description="stratosphere ssp server url")
+     private String url;
+ 
+     @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, required=false, description="stratosphere ssp api username")
+     private String username;
+ 
+     @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, required=false, description="stratosphere ssp api password")
+     private String password;
+ 
+     @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, required=true, description="stratosphere ssp api name")
+     private String name; // required because HostVO name field defined as NOT NULL.
+ 
+     @Parameter(name="tenantuuid", type=CommandType.STRING, required=false, description="stratosphere ssp tenant uuid")
+     private String tenantUuid; // required in creating ssp tenant_network
+ 
+     @Override
+     public String getCommandName() {
+         return getClass().getAnnotation(APICommand.class).name();
+     }
+ 
+     @Override
+     public long getEntityOwnerId() {
 -        return UserContext.current().getCaller().getId();
++        return CallContext.current().getCallingAccountId();
+     }
+ 
+     @Override
+     public void execute() throws ResourceUnavailableException,
+     InsufficientCapacityException, ConcurrentOperationException,
+     ResourceAllocationException, NetworkRuleConflictException {
+         s_logger.trace("execute");
+         Host host = _service.addSspHost(this);
+         SspResponse response = new SspResponse();
+         response.setResponseName(getCommandName());
+         response.setObjectName("ssphost");
 -        response.setUrl(this.getUrl());
++        response.setUrl(getUrl());
+         response.setZoneId(_dcDao.findById(getZoneId()).getUuid());
+         response.setHostId(host.getUuid());
 -        this.setResponseObject(response);
++        setResponseObject(response);
+     }
+ 
+     public Long getZoneId() {
+         return zoneId;
+     }
+ 
+     public void setZoneId(Long zoneId) {
+         this.zoneId = zoneId;
+     }
+ 
+     public String getUrl() {
+         return url;
+     }
+ 
+     public void setUrl(String url) {
+         this.url = url;
+     }
+ 
+     public String getUsername() {
+         return username;
+     }
+ 
+     public void setUsername(String username) {
+         this.username = username;
+     }
+ 
+     public String getPassword() {
+         return password;
+     }
+ 
+     public void setPassword(String password) {
+         this.password = password;
+     }
+ 
+     public String getName() {
+         return name;
+     }
+ 
+     public void setName(String name) {
+         this.name = name;
+     }
+ 
+     public String getTenantUuid() {
+         return tenantUuid;
+     }
+ 
+     public void setTenantUuid(String tenantUuid) {
+         this.tenantUuid = tenantUuid;
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/DeleteSspCmd.java
----------------------------------------------------------------------
diff --cc plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/DeleteSspCmd.java
index 0000000,bfbd5d9..5177dfb
mode 000000,100644..100644
--- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/DeleteSspCmd.java
+++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/api/commands/DeleteSspCmd.java
@@@ -1,0 -1,74 +1,75 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.api.commands;
+ 
+ import javax.inject.Inject;
+ 
 -import org.apache.cloudstack.api.response.SuccessResponse;
 -import org.apache.cloudstack.api.response.HostResponse;
++import org.apache.log4j.Logger;
++
+ import org.apache.cloudstack.api.APICommand;
+ import org.apache.cloudstack.api.ApiConstants;
+ import org.apache.cloudstack.api.BaseCmd;
+ import org.apache.cloudstack.api.Parameter;
++import org.apache.cloudstack.api.response.HostResponse;
++import org.apache.cloudstack.api.response.SuccessResponse;
++import org.apache.cloudstack.context.CallContext;
+ import org.apache.cloudstack.network.element.SspService;
 -import org.apache.log4j.Logger;
+ 
+ import com.cloud.exception.ConcurrentOperationException;
+ import com.cloud.exception.InsufficientCapacityException;
+ import com.cloud.exception.NetworkRuleConflictException;
+ import com.cloud.exception.ResourceAllocationException;
+ import com.cloud.exception.ResourceUnavailableException;
 -import com.cloud.user.UserContext;
+ 
+ @APICommand(name="deleteStratosphereSsp", responseObject=SuccessResponse.class, description="Removes stratosphere ssp server")
+ public class DeleteSspCmd extends BaseCmd {
+     private static final Logger s_logger = Logger.getLogger(AddSspCmd.class.getName());
+     @Inject
+     SspService _service;
+ 
+     @Parameter(name=ApiConstants.HOST_ID, type=CommandType.UUID, entityType=HostResponse.class,
+             required=true, description="the host ID of ssp server")
+     private Long hostId;
+ 
+     @Override
+     public String getCommandName() {
+         return getClass().getAnnotation(APICommand.class).name();
+     }
+ 
+     @Override
+     public long getEntityOwnerId() {
 -        return UserContext.current().getCaller().getId();
++        return CallContext.current().getCallingAccountId();
+     }
+ 
+     @Override
+     public void execute() throws ResourceUnavailableException,
+     InsufficientCapacityException, ConcurrentOperationException,
+     ResourceAllocationException, NetworkRuleConflictException {
+         s_logger.trace("execute");
+         SuccessResponse resp = new SuccessResponse();
+         resp.setSuccess(_service.deleteSspHost(this));
 -        this.setResponseObject(resp);
++        setResponseObject(resp);
+     }
+ 
+     public Long getHostId() {
+         return hostId;
+     }
+ 
+     public void setHostId(Long hostId) {
+         this.hostId = hostId;
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspCredentialVO.java
----------------------------------------------------------------------
diff --cc plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspCredentialVO.java
index 0000000,9c6cf14..d05b270
mode 000000,100644..100644
--- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspCredentialVO.java
+++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/dao/SspCredentialVO.java
@@@ -1,0 -1,67 +1,67 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.network.dao;
+ 
+ import javax.persistence.Column;
+ import javax.persistence.Entity;
+ import javax.persistence.GeneratedValue;
+ import javax.persistence.GenerationType;
+ import javax.persistence.Id;
+ import javax.persistence.Table;
+ 
+ @Entity
+ @Table(name="external_stratosphere_ssp_credentials")
+ public class SspCredentialVO {
+     @Id
+     @GeneratedValue(strategy = GenerationType.IDENTITY)
+     @Column(name="id")
+     private long id;
+ 
+     @Column(name="data_center_id")
 -    private long dataCenterId; // Actually, this is zoneId
++    private long zoneId; // Actually, this is zoneId, then name it as zone id.
+ 
+     // XXX: We might want to restrict access to this by cloudstack privileges.
+     @Column(name="username")
+     private String username;
+ 
+     @Column(name="password")
+     private String password;
+ 
+     public long getZoneId() {
 -        return dataCenterId;
++        return zoneId;
+     }
+ 
+     public void setZoneId(long zoneId) {
 -        this.dataCenterId = zoneId;
++        this.zoneId = zoneId;
+     }
+ 
+     public String getUsername() {
+         return username;
+     }
+ 
+     public void setUsername(String username) {
+         this.username = username;
+     }
+ 
+     public String getPassword() {
+         return password;
+     }
+ 
+     public void setPassword(String password) {
+         this.password = password;
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java
----------------------------------------------------------------------
diff --cc plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java
index 0000000,823c16b..d145117
mode 000000,100644..100644
--- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java
+++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/element/SspElement.java
@@@ -1,0 -1,619 +1,622 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.network.element;
+ 
+ import java.net.MalformedURLException;
+ import java.net.URL;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.UUID;
+ 
+ import javax.ejb.Local;
+ import javax.inject.Inject;
+ import javax.naming.ConfigurationException;
+ 
+ import org.apache.cloudstack.api.commands.AddSspCmd;
+ import org.apache.cloudstack.api.commands.DeleteSspCmd;
+ import org.apache.cloudstack.network.dao.SspCredentialDao;
+ import org.apache.cloudstack.network.dao.SspCredentialVO;
+ import org.apache.cloudstack.network.dao.SspTenantDao;
+ import org.apache.cloudstack.network.dao.SspTenantVO;
+ import org.apache.cloudstack.network.dao.SspUuidDao;
+ import org.apache.cloudstack.network.dao.SspUuidVO;
+ import org.apache.log4j.Logger;
+ 
+ import com.cloud.configuration.dao.ConfigurationDao;
+ import com.cloud.dc.dao.DataCenterDao;
+ import com.cloud.deploy.DeployDestination;
+ import com.cloud.exception.ConcurrentOperationException;
+ import com.cloud.exception.InsufficientCapacityException;
+ import com.cloud.exception.InvalidParameterValueException;
+ import com.cloud.exception.ResourceUnavailableException;
+ import com.cloud.host.Host;
+ import com.cloud.host.HostVO;
+ import com.cloud.host.dao.HostDao;
+ import com.cloud.network.Network;
+ import com.cloud.network.Network.Capability;
+ import com.cloud.network.Network.Provider;
+ import com.cloud.network.Network.Service;
+ import com.cloud.network.NetworkManager;
+ import com.cloud.network.NetworkMigrationResponder;
+ import com.cloud.network.NetworkModel;
+ import com.cloud.network.Networks.BroadcastDomainType;
+ import com.cloud.network.PhysicalNetwork;
+ import com.cloud.network.PhysicalNetworkServiceProvider;
+ import com.cloud.network.PhysicalNetworkServiceProvider.State;
+ import com.cloud.network.dao.NetworkServiceMapDao;
+ import com.cloud.network.dao.PhysicalNetworkDao;
+ import com.cloud.network.dao.PhysicalNetworkServiceProviderDao;
+ import com.cloud.network.dao.PhysicalNetworkServiceProviderVO;
+ import com.cloud.network.element.ConnectivityProvider;
+ import com.cloud.network.element.NetworkElement;
+ import com.cloud.offering.NetworkOffering;
++import com.cloud.resource.ResourceManager;
+ import com.cloud.utils.component.AdapterBase;
+ import com.cloud.utils.exception.CloudRuntimeException;
+ import com.cloud.vm.NicProfile;
+ import com.cloud.vm.NicVO;
+ import com.cloud.vm.ReservationContext;
 -import com.cloud.vm.VirtualMachine;
+ import com.cloud.vm.VirtualMachineProfile;
+ import com.cloud.vm.dao.NicDao;
 -import com.cloud.resource.ResourceManager;
+ 
+ /**
+  * Stratosphere sdn platform network element
+  *
+  * This class will be called per network setup operations.
+  * This class also have ssp specific methods.
+  *
+  * Current implementation use HostVO for storage of api endpoint information,
+  * but note this is not necessary. The other way is create our own database
+  * table for that information.
+  */
+ @Local(value={NetworkElement.class, SspManager.class})
+ public class SspElement extends AdapterBase implements ConnectivityProvider, SspManager, SspService, NetworkMigrationResponder {
+     private static final Logger s_logger = Logger.getLogger(SspElement.class);
+     public static final String s_SSP_NAME = "StratosphereSsp";
+ 
+     @Inject
+     NetworkServiceMapDao _ntwkSrvcDao;
+     @Inject
+     NetworkModel _networkModel;
+     @Inject
+     NetworkManager _networkMgr;
+     @Inject
+     ResourceManager _resourceMgr;
+     @Inject
+     PhysicalNetworkDao _physicalNetworkDao;
+     @Inject
+     PhysicalNetworkServiceProviderDao _physicalNetworkServiceProviderDao;
+     @Inject
+     SspCredentialDao _sspCredentialDao;
+     @Inject
+     SspTenantDao _sspTenantDao;
+     @Inject
+     SspUuidDao _sspUuidDao;
+     @Inject
+     DataCenterDao _dcDao;
+     @Inject
+     HostDao _hostDao;
+     @Inject
+     ConfigurationDao _configDao;
+     @Inject
+     NicDao _nicDao = null;
+ 
+     @Override
+     public boolean configure(String name, Map<String, Object> params)
+             throws ConfigurationException {
+         return super.configure(name, params);
+     }
+ 
+     @Override
+     public Map<Service, Map<Capability, String>> getCapabilities() {
+         Map<Service, Map<Capability, String>> capabilities = new HashMap<Service, Map<Capability, String>>();
+         capabilities.put(Service.Connectivity, new HashMap<Capability,String>()); // XXX: We might need some more category here.
+         return capabilities;
+     }
+ 
+     @Override
+     public Provider getProvider() {
+         Provider provider = null;
+         synchronized(s_SSP_NAME){
+             provider = Provider.getProvider(s_SSP_NAME);
+             if(provider==null){
+                 provider = new Provider(s_SSP_NAME, true);
+                 s_logger.debug("registering Network.Provider "+s_SSP_NAME);
+             }
+         }
+         return provider;
+     }
+ 
+     private List<SspClient> fetchSspClients(Long physicalNetworkId, Long dataCenterId, boolean enabled_only){
+         ArrayList<SspClient> clients = new ArrayList<SspClient>();
+ 
+         boolean provider_found = false;
+         PhysicalNetworkServiceProviderVO provider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetworkId, s_SSP_NAME);
+         if(enabled_only){
+             if(provider != null && provider.getState() == State.Enabled){
+                 provider_found = true;
+             }
+         }else{
+             provider_found = true;
+         }
+ 
+         if(physicalNetworkId != null && provider_found){
+             SspCredentialVO credential = _sspCredentialDao.findByZone(dataCenterId);
+             List<HostVO> hosts = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.L2Networking, dataCenterId);
+             for(HostVO host : hosts){
+                 assert(credential != null);
+                 _hostDao.loadDetails(host);
+                 if("v1Api".equals(host.getDetail("sspHost"))){
+                     clients.add(new SspClient(host.getDetail("url"), credential.getUsername(), credential.getPassword()));
+                 }
+             }
+         }
+         if(clients.size()==0){
+             String global_apiUrl = _configDao.getValueAndInitIfNotExist("ssp.url", "Network", null);
+             String global_username = _configDao.getValueAndInitIfNotExist("ssp.username", "Network", null);
+             String global_password = _configDao.getValueAndInitIfNotExist("ssp.password", "Network", null);
+             if(global_apiUrl != null && global_username != null && global_password != null){
+                 clients.add(new SspClient(global_apiUrl, global_username, global_password));
+             }
+         }
+         return clients;
+     }
+ 
+     /* (non-Javadoc)
+      * @see org.apache.cloudstack.network.element.NetworkElement#isReady(com.cloud.network.PhysicalNetworkServiceProvider)
+      */
+     @Override
+     public boolean isReady(PhysicalNetworkServiceProvider provider) {
+         PhysicalNetwork physicalNetwork = _physicalNetworkDao.findById(provider.getPhysicalNetworkId());
+         assert(physicalNetwork!=null);
+         if(physicalNetwork != null){
+             if(fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), false).size() > 0){
+                 return true;
+             }
+             s_logger.warn("Ssp api endpoint not found. "+physicalNetwork.toString());
+         }else{
+             s_logger.warn("PhysicalNetwork is NULL.");
+         }
+         return false;
+     }
+ 
+     /* (non-Javadoc)
+      * If this element is ready, then it can be enabled.
+      * @see org.apache.cloudstack.network.element.SspManager#isEnabled(com.cloud.network.PhysicalNetwork)
+      */
+     @Override
+     public boolean canHandle(PhysicalNetwork physicalNetwork){
+         if(physicalNetwork != null){
+             if(fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), true).size() > 0){
+                 return true;
+             }
+             s_logger.warn("enabled Ssp api endpoint not found. "+physicalNetwork.toString());
+         }else{
+             s_logger.warn("PhysicalNetwork is NULL.");
+         }
+         return false;
+     }
+ 
+     private boolean canHandle(Network network){
+         if(canHandle(_physicalNetworkDao.findById(network.getPhysicalNetworkId()))){
+             if(!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), Service.Connectivity, getProvider())){
+                 s_logger.info("SSP is implicitly active for "+network);
+             }
+             return true;
+         }
+         return false;
+     }
+ 
+     @Override
+     public Host addSspHost(AddSspCmd cmd) {
+         SspClient client = new SspClient(cmd.getUrl(), cmd.getUsername(), cmd.getPassword());
+         if(!client.login()){
+             throw new CloudRuntimeException("Ssp login failed.");
+         }
+ 
+         long zoneId = cmd.getZoneId();
+         SspCredentialVO credential = _sspCredentialDao.findByZone(zoneId);
+         if(credential==null){
+             if(cmd.getUsername()==null || cmd.getPassword()==null){
+                 throw new InvalidParameterValueException("Initial credential required for zone: " + zoneId);
+             }
+             credential = new SspCredentialVO();
+             credential.setZoneId(zoneId);
+             credential.setUsername(cmd.getUsername());
+             credential.setPassword(cmd.getPassword());
+             _sspCredentialDao.persist(credential);
+         }else{
+             if(cmd.getUsername()!=null || cmd.getPassword()!=null){
+                 s_logger.warn("Tenant credential already configured for zone:"+zoneId);
+             }
+         }
+ 
+         String tenantUuid = _sspTenantDao.findUuidByZone(zoneId);
+         if(tenantUuid==null){
+             if(cmd.getTenantUuid()==null){
+                 throw new InvalidParameterValueException("Initial tenant uuid required for zone: " + zoneId);
+             }
+             SspTenantVO tenant = new SspTenantVO();
+             tenant.setZoneId(zoneId);
+             tenant.setUuid(cmd.getTenantUuid());
+             _sspTenantDao.persist(tenant);
+         }else{
+             if(cmd.getTenantUuid()!=null){
+                 s_logger.warn("Tenant uuid already configured for zone:"+zoneId);
+             }
+         }
+ 
+         String normalizedUrl = null;
+         String hostname = null;
+         try {
+             URL url = new URL(cmd.getUrl());
+             normalizedUrl = url.toString();
+             hostname = url.getHost();
+         } catch (MalformedURLException e1) {
+             throw new CloudRuntimeException("Invalid url "+cmd.getUrl());
+         }
+ 
+         List<HostVO> hosts = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.L2Networking, zoneId);
+         for(HostVO host : hosts){
+             assert(credential != null);
+             _hostDao.loadDetails(host);
+             if("v1Api".equals(host.getDetail("sspHost"))){
+                 if(normalizedUrl.equals(host.getDetail("url"))){
+                     s_logger.warn("Ssp host already registered "+normalizedUrl);
+                     return host;
+                 }
+             }
+         }
+         // SspHost HostVO will be created per zone and url.
+         HostVO host = new HostVO(UUID.randomUUID().toString());
+         host.setDataCenterId(zoneId);
+         host.setType(Host.Type.L2Networking);
+         host.setPrivateIpAddress(hostname); // db schema not null. It may be a name, not IP address.
+         //        host.setPrivateMacAddress(""); // db schema nullable
+         //        host.setPrivateNetmask(""); // db schema nullable
+         host.setVersion("1"); // strange db schema not null
+         host.setName(cmd.getName());
+ 
+         host.setDetails(new HashMap<String, String>());
+         host.setDetail("sspHost", "v1Api");
+         host.setDetail("url", normalizedUrl);
+         return _hostDao.persist(host);
+     }
+ 
+     @Override
+     public boolean deleteSspHost(DeleteSspCmd cmd) {
+         s_logger.info("deleteStratosphereSsp");
+         return _hostDao.remove(cmd.getHostId());
+     }
+ 
++    @Override
+     public boolean createNetwork(Network network, NetworkOffering offering,
+             DeployDestination dest, ReservationContext context){
+         if(_sspUuidDao.findUuidByNetwork(network) != null){
+             s_logger.info("Network already has ssp TenantNetwork uuid :"+network.toString());
+             return true;
+         }
+         if(!canHandle(network)){
+             return false;
+         }
+ 
+         String tenantUuid = _sspTenantDao.findUuidByZone(network.getDataCenterId());
+         if(tenantUuid==null){
+             tenantUuid = _configDao.getValueAndInitIfNotExist("ssp.tenant", "Network", null);
+         }
+ 
+         boolean processed = false;
+         for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+             SspClient.TenantNetwork sspNet = client.createTenantNetwork(tenantUuid, network.getName());
+             if(sspNet != null){
+                 SspUuidVO uuid = new SspUuidVO();
+                 uuid.setUuid(sspNet.uuid);
+                 uuid.setObjClass(SspUuidVO.objClassNetwork);
+                 uuid.setObjId(network.getId());
+                 _sspUuidDao.persist(uuid);
+                 return true;
+             }
+             processed = true;
+         }
+         if(processed){
+             s_logger.error("Could not allocate an uuid for network "+network.toString());
+             return false;
+         }else{
+             s_logger.error("Skipping #createNetwork() for "+network.toString());
+             return true;
+         }
+     }
+ 
++    @Override
+     public boolean deleteNetwork(Network network){
+         String tenantNetworkUuid = _sspUuidDao.findUuidByNetwork(network);
+         if(tenantNetworkUuid != null){
+             boolean processed = false;
+             for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+                 if(client.deleteTenantNetwork(tenantNetworkUuid)){
+                     _sspUuidDao.removeUuid(tenantNetworkUuid);
+                     processed = true;
+                     break;
+                 }
+             }
+             if(!processed){
+                 s_logger.error("Ssp api tenant network deletion failed "+network.toString());
+             }
+         }else{
+             s_logger.debug("Silently skipping #deleteNetwork() for "+network.toString());
+         }
+         return true;
+     }
+ 
+     // we use context.reservationId for dedup of guru & element operations.
++    @Override
+     public boolean createNicEnv(Network network, NicProfile nic, DeployDestination dest, ReservationContext context){
+         String tenantNetworkUuid = _sspUuidDao.findUuidByNetwork(network);
+         if(tenantNetworkUuid == null){
+             s_logger.debug("Skipping #createNicEnv() for nic on "+network.toString());
+             return true;
+         }
+ 
+         String reservationId = context.getReservationId();
+         List<SspUuidVO> tenantPortUuidVos = _sspUuidDao.listUUidVoByNicProfile(nic);
+         for(SspUuidVO tenantPortUuidVo : tenantPortUuidVos){
+             if(reservationId.equals(tenantPortUuidVo.getReservationId())){
+                 s_logger.info("Skipping because reservation found "+reservationId);
+                 return true;
+             }
+         }
+ 
+         String tenantPortUuid = null;
+         for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+             SspClient.TenantPort sspPort = client.createTenantPort(tenantNetworkUuid);
+             if(sspPort != null){
+                 tenantPortUuid = sspPort.uuid;
+                 nic.setReservationId(reservationId);
+ 
+                 SspUuidVO uuid = new SspUuidVO();
+                 uuid.setUuid(tenantPortUuid);
+                 uuid.setObjClass(SspUuidVO.objClassNicProfile);
+                 uuid.setObjId(nic.getId());
+                 uuid.setReservationId(reservationId);
+                 _sspUuidDao.persist(uuid);
+                 break;
+             }
+         }
+         if(tenantPortUuid == null){
+             s_logger.debug("#createNicEnv() failed for nic on "+network.toString());
+             return false;
+         }
+ 
+         for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+             SspClient.TenantPort sspPort = client.updateTenantVifBinding(tenantPortUuid, dest.getHost().getPrivateIpAddress());
+             if(sspPort != null){
+                 if(sspPort.vlanId != null){
+                     nic.setBroadcastType(BroadcastDomainType.Vlan);
+                     nic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(String.valueOf(sspPort.vlanId)));
+                 }
+                 return true;
+             }
+         }
+         s_logger.error("Updating vif failed "+nic.toString());
+         return false;
+     }
+ 
++    @Override
+     public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext context){
+         if(context==null){
+             s_logger.error("ReservationContext was null for "+nic+" "+network);
+             return false;
+         }
+         String reservationId = context.getReservationId();
+ 
+         SspUuidVO deleteTarget = null;
+         SspUuidVO remainingTarget = null;
+         List<SspUuidVO> tenantPortUuidVos = _sspUuidDao.listUUidVoByNicProfile(nic);
+         for(SspUuidVO tenantPortUuidVo : tenantPortUuidVos){
+             if(reservationId.equals(tenantPortUuidVo.getReservationId())){
+                 deleteTarget = tenantPortUuidVo;
+             }else{
+                 remainingTarget = tenantPortUuidVo;
+             }
+         }
+ 
+         if(deleteTarget != null){ // delete the target ssp uuid (tenant-port)
+             String tenantPortUuid = deleteTarget.getUuid();
+             boolean processed = false;
+             for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+                 SspClient.TenantPort sspPort = client.updateTenantVifBinding(tenantPortUuid, null);
+                 if(sspPort != null){
+                     processed = true;
+                     break;
+                 }
+             }
+             if(!processed){
+                 s_logger.warn("Ssp api nic detach failed "+nic.toString());
+             }
+             processed = false;
+             for(SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)){
+                 if(client.deleteTenantPort(tenantPortUuid)){
+                     _sspUuidDao.removeUuid(tenantPortUuid);
+                     processed = true;
+                     break;
+                 }
+             }
+             if(!processed){
+                 s_logger.warn("Ssp api tenant port deletion failed "+nic.toString());
+             }
+             _sspUuidDao.removeUuid(tenantPortUuid);
+         }
+         if(remainingTarget != null){
+             NicVO nicVo = _nicDao.findById(nic.getId());
+             nicVo.setReservationId(remainingTarget.getReservationId());
+             _nicDao.persist(nicVo); // persist the new reservationId
+         }
+         return true;
+     }
+ 
+     /* (non-Javadoc)
+      * Implements a network using ssp element.
+      *
+      * This method will be called right after NetworkGuru#implement().
+      * see also {@link #shutdown(Network, ReservationContext, boolean)}
+      * @see org.apache.cloudstack.network.element.NetworkElement#implement(com.cloud.network.Network, com.cloud.offering.NetworkOffering, com.cloud.deploy.DeployDestination, com.cloud.vm.ReservationContext)
+      */
+     @Override
+     public boolean implement(Network network, NetworkOffering offering,
+             DeployDestination dest, ReservationContext context)
+                     throws ConcurrentOperationException, ResourceUnavailableException,
+                     InsufficientCapacityException {
+         s_logger.info("implement");
+         return createNetwork(network, offering, dest, context);
+     }
+ 
+     /* (non-Javadoc)
+      * Shutdown the network implementation
+      *
+      * This method will be called right BEFORE NetworkGuru#shutdown().
+      * The entities was acquired by {@link #implement(Network, NetworkOffering, DeployDestination, ReservationContext)}
+      * @see org.apache.cloudstack.network.element.NetworkElement#shutdown(com.cloud.network.Network, com.cloud.vm.ReservationContext, boolean)
+      */
+     @Override
+     public boolean shutdown(Network network, ReservationContext context,
+             boolean cleanup) throws ConcurrentOperationException,
+             ResourceUnavailableException {
+         s_logger.trace("shutdown");
+         return deleteNetwork(network);
+     }
+ 
+     /* (non-Javadoc)
+      * Prepares a network environment for a VM nic.
+      *
+      * This method will be called right after NetworkGuru#reserve().
+      * The entities will be released by {@link #release(Network, NicProfile, VirtualMachineProfile, ReservationContext)}
+      * @see org.apache.cloudstack.network.element.NetworkElement#prepare(com.cloud.network.Network, com.cloud.vm.NicProfile, com.cloud.vm.VirtualMachineProfile, com.cloud.deploy.DeployDestination, com.cloud.vm.ReservationContext)
+      */
+     @Override
+     public boolean prepare(Network network, NicProfile nic,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             DeployDestination dest, ReservationContext context)
+                     throws ConcurrentOperationException, ResourceUnavailableException,
+                     InsufficientCapacityException {
+         s_logger.trace("prepare");
+         return createNicEnv(network, nic, dest, context);
+     }
+ 
+     /* (non-Javadoc)
+      * Release the network environment that was prepared for a VM nic.
+      *
+      * This method will be called right AFTER NetworkGuru#release().
+      * The entities was acquired in {@link #prepare(Network, NicProfile, VirtualMachineProfile, DeployDestination, ReservationContext)}
+      * @see org.apache.cloudstack.network.element.NetworkElement#release(com.cloud.network.Network, com.cloud.vm.NicProfile, com.cloud.vm.VirtualMachineProfile, com.cloud.vm.ReservationContext)
+      */
+     @Override
+     public boolean release(Network network, NicProfile nic,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             ReservationContext context) throws ConcurrentOperationException,
+             ResourceUnavailableException {
+         s_logger.trace("release");
+         return deleteNicEnv(network, nic, context);
+     }
+ 
+     /* (non-Javadoc)
+      * Destroy a network implementation.
+      *
+      * This method will be called right BEFORE NetworkGuru#trash() in "Expunge" phase.
+      * @see org.apache.cloudstack.network.element.NetworkElement#destroy(com.cloud.network.Network)
+      */
+     @Override
+     public boolean destroy(Network network, ReservationContext context)
+             throws ConcurrentOperationException, ResourceUnavailableException {
+         s_logger.trace("destroy");
+         // nothing to do here.
+         return true;
+     }
+ 
+     @Override
+     public boolean shutdownProviderInstances(
+             PhysicalNetworkServiceProvider provider, ReservationContext context)
+                     throws ConcurrentOperationException, ResourceUnavailableException {
+         s_logger.trace("shutdownProviderInstances");
+         return true;
+     }
+ 
+     @Override
+     public boolean canEnableIndividualServices() {
+         s_logger.trace("canEnableIndividualServices");
+         return true; // because there is only Connectivity
+     }
+ 
+     @Override
+     public boolean verifyServicesCombination(Set<Service> services) {
+         s_logger.trace("verifyServicesCombination "+services.toString());
+         return true;
+     }
+ 
+     @Override
+     public boolean prepareMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             DeployDestination dest, ReservationContext context) {
+         try {
+             prepare(network, nic, vm, dest, context);
+         } catch (ConcurrentOperationException e) {
+             s_logger.error("prepareForMigration failed.", e);
+             return false;
+         } catch (ResourceUnavailableException e) {
+             s_logger.error("prepareForMigration failed.", e);
+             return false;
+         } catch (InsufficientCapacityException e) {
+             s_logger.error("prepareForMigration failed.", e);
+             return false;
+         }
+         return true;
+     }
+ 
+     @Override
+     public void rollbackMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             ReservationContext src, ReservationContext dst) {
+         try {
+             release(network, nic, vm, dst);
+         } catch (ConcurrentOperationException e) {
+             s_logger.error("rollbackMigration failed.", e);
+         } catch (ResourceUnavailableException e) {
+             s_logger.error("rollbackMigration failed.", e);
+         }
+     }
+ 
+     @Override
+     public void commitMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             ReservationContext src, ReservationContext dst) {
+         try {
+             release(network, nic, vm, src);
+         } catch (ConcurrentOperationException e) {
+             s_logger.error("commitMigration failed.", e);
+         } catch (ResourceUnavailableException e) {
+             s_logger.error("commitMigration failed.", e);
+         }
+     }
+ 
+     @Override
+     public List<Class<?>> getCommands() {
+         return Arrays.<Class<?>>asList(AddSspCmd.class, DeleteSspCmd.class);
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java
----------------------------------------------------------------------
diff --cc plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java
index 0000000,6d5d871..1057bec
mode 000000,100644..100644
--- a/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java
+++ b/plugins/network-elements/stratosphere-ssp/src/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java
@@@ -1,0 -1,173 +1,172 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.network.guru;
+ 
+ import javax.ejb.Local;
+ import javax.inject.Inject;
+ 
+ import org.apache.cloudstack.network.element.SspElement;
+ import org.apache.cloudstack.network.element.SspManager;
+ import org.apache.log4j.Logger;
+ 
+ import com.cloud.dc.DataCenter.NetworkType;
+ import com.cloud.deploy.DeployDestination;
+ import com.cloud.exception.InsufficientAddressCapacityException;
+ import com.cloud.exception.InsufficientVirtualNetworkCapcityException;
+ import com.cloud.network.Network;
+ import com.cloud.network.NetworkMigrationResponder;
+ import com.cloud.network.NetworkProfile;
+ import com.cloud.network.PhysicalNetwork;
+ import com.cloud.network.PhysicalNetwork.IsolationMethod;
+ import com.cloud.network.dao.NetworkDao;
+ import com.cloud.network.guru.GuestNetworkGuru;
+ import com.cloud.network.guru.NetworkGuru;
+ import com.cloud.offering.NetworkOffering;
+ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
+ import com.cloud.vm.NicProfile;
+ import com.cloud.vm.ReservationContext;
+ import com.cloud.vm.ReservationContextImpl;
 -import com.cloud.vm.VirtualMachine;
+ import com.cloud.vm.VirtualMachineProfile;
+ 
+ /**
+  * Stratosphere SDN Platform NetworkGuru
+  */
+ @Local(value=NetworkGuru.class)
+ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder {
+     private static final Logger s_logger = Logger.getLogger(SspGuestNetworkGuru.class);
+ 
+     @Inject
+     SspManager _sspMgr;
+     @Inject
+     NetworkDao _networkDao;
+     @Inject
+     NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao;
+ 
+     public SspGuestNetworkGuru() {
+         super();
+         _isolationMethods = new IsolationMethod[] { IsolationMethod.SSP };
+     }
+ 
+     @Override
+     protected boolean canHandle(NetworkOffering offering,
+             NetworkType networkType, PhysicalNetwork physicalNetwork) {
+         s_logger.trace("canHandle");
+ 
+         String setting = null;
+         if(physicalNetwork != null && physicalNetwork.getIsolationMethods().contains("SSP")){
+             // Be careful, PhysicalNetwork#getIsolationMethods() returns List<String>, not List<IsolationMethod>
+             setting = "physicalnetwork setting";
+         }else if(_ntwkOfferingSrvcDao.isProviderForNetworkOffering(offering.getId(), Network.Provider.getProvider(SspElement.s_SSP_NAME))){
+             setting = "network offering setting";
+         }
+         if(setting != null){
+             if (networkType != NetworkType.Advanced){
+                 s_logger.info("SSP enebled by "+setting+" but not active because networkType was "+networkType);
+             }else if(!isMyTrafficType(offering.getTrafficType())){
+                 s_logger.info("SSP enabled by "+setting+" but not active because traffic type not Guest");
+             }else if(offering.getGuestType() != Network.GuestType.Isolated){
+                 s_logger.info("SSP works for network isolatation.");
+             }else if(!_sspMgr.canHandle(physicalNetwork)){
+                 s_logger.info("SSP manager not ready");
+             }else{
+                 return true;
+             }
+         }else{
+             s_logger.debug("SSP not configured to be active");
+         }
+         return false;
+     }
+ 
+     /* (non-Javadoc)
+      * FYI: What is done in parent class is allocateVnet(vlan).
+      * Effective return object members are: cidr, broadcastUri, gateway, mode, physicalNetworkId
+      * The other members will be silently ignored.
+      * This method is called at DeployVMCmd#execute (running phase) - NetworkManagerImpl#prepare
+      * @see org.apache.cloudstack.network.guru.GuestNetworkGuru#implement(com.cloud.network.Network, com.cloud.offering.NetworkOffering, com.cloud.deploy.DeployDestination, com.cloud.vm.ReservationContext)
+      */
+     @Override
+     public Network implement(Network network, NetworkOffering offering,
+             DeployDestination dest, ReservationContext context)
+                     throws InsufficientVirtualNetworkCapcityException {
+         s_logger.trace("implement "+network.toString());
+         super.implement(network, offering, dest, context);
+         _sspMgr.createNetwork(network, offering, dest, context);
+         return network;
+     }
+ 
+ 
+     @Override
+     public void shutdown(NetworkProfile profile, NetworkOffering offering) {
+         s_logger.trace("shutdown "+profile.toString());
+         _sspMgr.deleteNetwork(profile);
+         super.shutdown(profile, offering);
+     }
+ 
+     @Override
+     public void reserve(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             DeployDestination dest, ReservationContext context)
+                     throws InsufficientVirtualNetworkCapcityException,
+                     InsufficientAddressCapacityException {
+         super.reserve(nic, network, vm, dest, context);
+         _sspMgr.createNicEnv(network, nic, dest, context);
+     }
+ 
+     @Override
+     public boolean release(NicProfile nic,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             String reservationId) {
+         Network network = _networkDao.findById(nic.getNetworkId());
+         _sspMgr.deleteNicEnv(network, nic, new ReservationContextImpl(reservationId, null, null));
+         return super.release(nic, vm, reservationId);
+     }
+ 
+     @Override
+     public void updateNicProfile(NicProfile profile, Network network) {
+         super.updateNicProfile(profile, network);
+     }
+ 
+     @Override
+     public boolean prepareMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             DeployDestination dest, ReservationContext context) {
+         try {
+             reserve(nic, network, vm, dest, context);
+         } catch (InsufficientVirtualNetworkCapcityException e) {
+             s_logger.error("prepareForMigration failed", e);
+             return false;
+         } catch (InsufficientAddressCapacityException e) {
+             s_logger.error("prepareForMigration failed", e);
+             return false;
+         }
+         return true;
+     }
+ 
+     @Override
+     public void rollbackMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             ReservationContext src, ReservationContext dst) {
+         release(nic, vm, dst.getReservationId());
+     }
+ 
+     @Override
+     public void commitMigration(NicProfile nic, Network network,
 -            VirtualMachineProfile<? extends VirtualMachine> vm,
++            VirtualMachineProfile vm,
+             ReservationContext src, ReservationContext dst) {
+         release(nic, vm, src.getReservationId());
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
index f6d7b40,a31a036..ea42550
--- a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
@@@ -97,6 -96,36 +97,36 @@@ public class TemplateJoinDaoImpl extend
  
  
  
+     private String getTemplateStatus(TemplateJoinVO template){
+         boolean isAdmin = false;
 -        Account caller = UserContext.current().getCaller();
++        Account caller = CallContext.current().getCallingAccount();
+         if ((caller == null) || BaseCmd.isAdmin(caller.getType())) {
+             isAdmin = true;
+         }
+ 
+         // If the user is an Admin, add the template download status
+         String templateStatus = null;
+         if (isAdmin || caller.getId() == template.getAccountId()) {
+             // add download status
+             if (template.getDownloadState() != Status.DOWNLOADED) {
+                 templateStatus = "Processing";
+                 if (template.getDownloadState() == VMTemplateHostVO.Status.DOWNLOAD_IN_PROGRESS) {
+                     if (template.getDownloadPercent() == 100) {
+                         templateStatus = "Installing Template";
+                     } else {
+                         templateStatus = template.getDownloadPercent() + "% Downloaded";
+                     }
+                 } else {
+                     templateStatus = template.getErrorString();
+                 }
+             } else if (template.getDownloadState() == VMTemplateHostVO.Status.DOWNLOADED) {
+                 templateStatus = "Download Complete";
+             } else {
+                 templateStatus = "Successfully Installed";
+             }
+         }
+         return templateStatus;
+     }
  
      @Override
      public TemplateResponse newTemplateResponse(TemplateJoinVO template) {
@@@ -137,33 -166,10 +167,10 @@@
          templateResponse.setDomainName(template.getDomainName());
  
  
- 
-         boolean isAdmin = false;
-         Account caller = CallContext.current().getCallingAccount();
-         if ((caller == null) || BaseCmd.isAdmin(caller.getType())) {
-             isAdmin = true;
-         }
- 
          // If the user is an Admin, add the template download status
-         if (isAdmin || caller.getId() == template.getAccountId()) {
-             // add download status
-             if (template.getDownloadState() != Status.DOWNLOADED) {
-                 String templateStatus = "Processing";
-                 if (template.getDownloadState() == VMTemplateHostVO.Status.DOWNLOAD_IN_PROGRESS) {
-                     if (template.getDownloadPercent() == 100) {
-                         templateStatus = "Installing Template";
-                     } else {
-                         templateStatus = template.getDownloadPercent() + "% Downloaded";
-                     }
-                 } else {
-                     templateStatus = template.getErrorString();
-                 }
+         String templateStatus = getTemplateStatus(template);
+         if ( templateStatus != null ){
 -            templateResponse.setStatus(templateStatus);
 +                templateResponse.setStatus(templateStatus);
-             } else if (template.getDownloadState() == VMTemplateHostVO.Status.DOWNLOADED) {
-                 templateResponse.setStatus("Download Complete");
-             } else {
-                 templateResponse.setStatus("Successfully Installed");
-             }
          }
  
          Long templateSize = template.getSize();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/configuration/Config.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/configuration/ConfigurationManager.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/configuration/ConfigurationManager.java
index 831b2c8,1b99b63..9bbd537
--- a/server/src/com/cloud/configuration/ConfigurationManager.java
+++ b/server/src/com/cloud/configuration/ConfigurationManager.java
@@@ -107,8 -111,9 +110,9 @@@ public interface ConfigurationManager e
       * @param iopsWriteRate
       * @return newly created disk offering
       */
-     DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
+     DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+     		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
 -    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate);
 +            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate);
  
      /**
       * Creates a new pod

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/configuration/ConfigurationManagerImpl.java
index a27c103,2089f82..e1a35ac
--- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
+++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
@@@ -2307,8 -2297,9 +2305,9 @@@ public class ConfigurationManagerImpl e
  
      @Override
      @ActionEvent(eventType = EventTypes.EVENT_DISK_OFFERING_CREATE, eventDescription = "creating disk offering")
-     public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
+     public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+     		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
 -    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
 +            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
          long diskSize = 0;// special case for custom disk offerings
          if (numGibibytes != null && (numGibibytes <= 0)) {
              throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb.");
@@@ -2324,11 -2315,46 +2323,47 @@@
              isCustomized = true;
          }
  
+         if (isCustomizedIops != null) {
+             bytesReadRate = null;
+             bytesWriteRate = null;
+             iopsReadRate = null;
+             iopsWriteRate = null;
+ 
+             if (isCustomizedIops) {
+             	minIops = null;
+             	maxIops = null;
+             }
+             else {
+                 if (minIops == null && maxIops == null) {
+                     minIops = 0L;
+                     maxIops = 0L;
+                 }
+                 else {
+                 	if (minIops == null || minIops <= 0) {
+                 	    throw new InvalidParameterValueException("The min IOPS must be greater than 0.");
+         	        }
+ 
+                 	if (maxIops == null) {
+         	        	maxIops = 0L;
+         	        }
+ 
+                 	if (minIops > maxIops) {
+                 		throw new InvalidParameterValueException("The min IOPS must be less than or equal to the max IOPS.");
+                 	}
+                 }
+             }
+         }
+         else {
+             minIops = null;
+             maxIops = null;
+         }
+ 
          tags = cleanupTags(tags);
-         DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized);
+         DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized,
+         		isCustomizedIops, minIops, maxIops);
          newDiskOffering.setUseLocalStorage(localStorageRequired);
          newDiskOffering.setDisplayOffering(isDisplayOfferingEnabled);
 +        CallContext.current().setEventDetails("Disk offering id=" + newDiskOffering.getId());
  
          if (bytesReadRate != null && (bytesReadRate > 0))
              newDiskOffering.setBytesReadRate(bytesReadRate);
@@@ -3233,34 -3269,31 +3276,31 @@@
                              .getVlanType().toString(), ip.getSystem(), ip.getClass().getName(), ip.getUuid());
                  }
              }
+             try {
 -                if (_networkModel.areServicesSupportedInNetwork(vlanRange.getNetworkId(), Service.Dhcp)) {
 -                    Network network = _networkDao.findById(vlanRange.getNetworkId());
 -                    DhcpServiceProvider dhcpServiceProvider = _networkMgr.getDhcpServiceProvider(network);
 -                    if (!dhcpServiceProvider.getProvider().getName().equalsIgnoreCase(Provider.VirtualRouter.getName())) {
 +            if (_networkModel.areServicesSupportedInNetwork(vlanRange.getNetworkId(), Service.Dhcp)) {
 +                Network network = _networkDao.findById(vlanRange.getNetworkId());
 +                DhcpServiceProvider dhcpServiceProvider = _networkMgr.getDhcpServiceProvider(network);
 +                if (!dhcpServiceProvider.getProvider().getName().equalsIgnoreCase(Provider.VirtualRouter.getName())) {
-                     Transaction txn = Transaction.currentTxn();
-                     txn.start();
-                     if (!removeFromDb(vlanDbId)) {
-                         txn.rollback();
-                         txn.close();
-                         return false;
-                     }
- 
-                     else {
-                         txn.commit();
+                         deleteVLANFromDb(vlanDbId);
+                     } else {
+                         return  handleIpAliasDeletion(vlanRange, vlanDbId, dhcpServiceProvider, network);
                      }
                  }
  
                  else {
-                   return  handleIpAliasDeletion(vlanRange, vlanDbId, dhcpServiceProvider, network);
+                     deleteVLANFromDb(vlanDbId);
                  }
              }
+             catch ( SQLException e) {
+                throw  new CloudRuntimeException(e.getMessage());
+             }
+ 
          }
 -        return true;
 -    }
 +                    return  true;
 +                }
  
-     private boolean handleIpAliasDeletion(VlanVO vlanRange, long vlanDbId, DhcpServiceProvider dhcpServiceProvider, Network network) {
-         boolean result_final = false;
+     @DB
+     private boolean handleIpAliasDeletion(VlanVO vlanRange, long vlanDbId, DhcpServiceProvider dhcpServiceProvider, Network network) throws SQLException {
          Transaction txn = Transaction.currentTxn();
          txn.start();
          IPAddressVO ip = null;
@@@ -3268,89 -3301,54 +3308,54 @@@
          try{
              Integer allocIpCount=0;
              //search if the vlan has any allocated ips.
 -            allocIpCount = _publicIpAddressDao.countIPs(vlanRange.getDataCenterId(), vlanDbId, true);
 -            if (allocIpCount > 1) {
 +                allocIpCount = _publicIpAddressDao.countIPs(vlanRange.getDataCenterId(), vlanDbId, true);
 +                if (allocIpCount > 1) {
-                     throw  new InvalidParameterValueException ("cannot delete this range as some of the vlans are in use.");
+                 throw  new InvalidParameterValueException ("Cannot delete this range as some of the vlans are in use.");
 -            }
 +                }
-                 if (allocIpCount == 0){
-                 result_final=true;
+             else if (allocIpCount == 0){
+                 deleteVLANFromDb(vlanDbId);
 -            }
 +                }
              else {
                  ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(vlanRange.getVlanGateway(), vlanRange.getNetworkId(),  NicIpAlias.state.active);
-                 ipAlias.setState(NicIpAlias.state.revoked);
-                 _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
+                 if (ipAlias == null) {
+                     throw  new InvalidParameterValueException ("Cannot delete this range as some of the Ips are in use.");
+                 }
+ 
                  //check if this ip belongs to this vlan and is allocated.
                  ip = _publicIpAddressDao.findByIpAndVlanId(ipAlias.getIp4Address(), vlanDbId);
                  if (ip != null && ip.getState() == IpAddress.State.Allocated) {
                      //check if there any other vlan ranges in the same subnet having free ips
                      List<VlanVO> vlanRanges = _vlanDao.listVlansByNetworkIdAndGateway(vlanRange.getNetworkId(), vlanRange.getVlanGateway());
                      //if there is no other vlanrage in this subnet. free the ip and delete the vlan.
-                     if (vlanRanges.size() == 1){
-                         boolean result = dhcpServiceProvider.removeDhcpSupportForSubnet(network);
-                         if (result == false) {
-                             result_final = false;
+                     if (vlanRanges.size() == 1) {
+                         ipAlias.setState(NicIpAlias.state.revoked);
+                         _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
+                         if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) {
                              s_logger.debug("Failed to delete the vlan range as we could not free the ip used to provide the dhcp service.");
-                         } else {
-                             _publicIpAddressDao.unassignIpAddress(ip.getId());
-                             result_final = true;
-                         }
-         } else {
-                         // if there are more vlans in the subnet check if there
-                         // are free ips.
-                         List<Long> vlanDbIdList = new ArrayList<Long>();
-                         for (VlanVO vlanrange : vlanRanges) {
-                             if (vlanrange.getId() != vlanDbId) {
-                                 vlanDbIdList.add(vlanrange.getId());
-                             }
-                         }
-                         s_logger.info("vlan Range"
-                                 + vlanRange.getId()
-                                 + " id being deleted, one of the Ips in this range is used to provide the dhcp service, trying to free this ip and allocate a new one.");
-                         for (VlanVO vlanrange : vlanRanges) {
-                             if (vlanrange.getId() != vlanDbId) {
- 
-                                 long freeIpsInsubnet =  _publicIpAddressDao.countFreeIpsInVlan(vlanrange.getId());
-                                 if (freeIpsInsubnet > 0){
-                                     //assign one free ip to the router for creating ip Alias. The ipalias is system managed ip so we are using the system account to allocate the ip not the caller.
-                                     boolean result = false;
-                                     PublicIp routerPublicIP = _networkMgr.assignPublicIpAddressFromVlans(network.getDataCenterId(), null, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), Vlan.VlanType.DirectAttached, vlanDbIdList, network.getId(), null, false);
-                                         s_logger.info("creating a db entry for the new ip alias.");
-                                         NicIpAliasVO newipAlias = new NicIpAliasVO(ipAlias.getNicId(), routerPublicIP.getAddress().addr(), ipAlias.getVmId(), ipAlias.getAccountId(), network.getDomainId(), network.getId(), ipAlias.getGateway(), ipAlias.getNetmask());
-                                         newipAlias.setAliasCount(routerPublicIP.getIpMacAddress());
-                                         _nicIpAliasDao.persist(newipAlias);
-                                         //we revoke all the rules and apply all the rules as a part of the removedhcp config. so the new ip will get configured when we delete the old ip.
-                                     s_logger.info("removing the old ip alias on router");
-                                     result = dhcpServiceProvider.removeDhcpSupportForSubnet(network);
-                                     if (result == false) {
-                                         s_logger.debug("could't delete the ip alias on the router");
-                                         result_final = false;
+                             //setting the state back to active
+                             ipAlias.setState(NicIpAlias.state.active);
+                             _nicIpAliasDao.update(ipAlias.getId(), ipAlias);
 -                        }
 -                        else {
 -                            _publicIpAddressDao.unassignIpAddress(ip.getId());
 +                                    }
 +                                    else {
 +                                    _publicIpAddressDao.unassignIpAddress(ip.getId());
-                                         result_final=true;
-                                     }
-         }
-                                 }
-                             }
+                             deleteVLANFromDb(vlanDbId);
                          }
+                     } else {
+                         // if there are more vlans in the subnet, free all the ips in the range except the ip alias.
+                         s_logger.info("vlan Range"+vlanRange.getId()+" id being deleted, one of the Ips in this range is used to provide the dhcp service, will free the rest of the IPs in range.");
+                         _publicIpAddressDao.deletePublicIPRangeExceptAliasIP(vlanDbId, ipAlias.getIp4Address());
+                         VlanVO vlan = _vlanDao.findById(vlanDbId);
+                         vlan.setIpRange(ipAlias.getIp4Address()+"-"+ipAlias.getIp4Address());
+                         _vlanDao.update(vlan.getId(), vlan);
 -                    }
 +    }
 +                }
- 
-         } catch (InsufficientAddressCapacityException e) {
-             throw new InvalidParameterValueException("cannot delete  vlan range"+ vlanRange.getId()+"one of the ips in this range is benig used to provide dhcp service. Cannot use some other ip as there are no free ips in this subnet");
                  }
-         finally {
-             if (result_final) {
-                 if (!removeFromDb(vlanDbId)) {
 -            }
+         } catch (CloudRuntimeException e) {
 -            txn.rollback();
 +                    txn.rollback();
+             throw e;
 -        }
 -        txn.commit();
 +                }
-                 else {
 +                    txn.commit();
-                 }
-                 txn.close();
-             }
-         }
-         return result_final;
+         return true;
      }
  
      @Override

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/network/NetworkManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/network/NetworkUsageManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/network/NetworkUsageManagerImpl.java
index 2bd55bb,972d3ff..7bd96a4
--- a/server/src/com/cloud/network/NetworkUsageManagerImpl.java
+++ b/server/src/com/cloud/network/NetworkUsageManagerImpl.java
@@@ -32,6 -32,6 +32,10 @@@ import javax.naming.ConfigurationExcept
  import org.apache.log4j.Logger;
  import org.springframework.stereotype.Component;
  
++import org.apache.cloudstack.api.command.admin.usage.AddTrafficMonitorCmd;
++import org.apache.cloudstack.api.command.admin.usage.DeleteTrafficMonitorCmd;
++import org.apache.cloudstack.api.command.admin.usage.ListTrafficMonitorsCmd;
++
  import com.cloud.agent.AgentManager;
  import com.cloud.agent.Listener;
  import com.cloud.agent.api.AgentControlAnswer;
@@@ -68,17 -68,16 +72,9 @@@ import com.cloud.resource.ResourceManag
  import com.cloud.resource.ResourceStateAdapter;
  import com.cloud.resource.ServerResource;
  import com.cloud.resource.UnableDeleteHostException;
  import com.cloud.usage.UsageIPAddressVO;
  import com.cloud.user.AccountManager;
  import com.cloud.user.AccountVO;
--import com.cloud.user.User;
 -import com.cloud.user.UserContext;
  import com.cloud.user.UserStatisticsVO;
  import com.cloud.user.dao.UserStatisticsDao;
  import com.cloud.utils.NumbersUtil;
@@@ -195,16 -193,11 +190,11 @@@ public class NetworkUsageManagerImpl ex
              throw new InvalidParameterValueException("Could not find an traffic monitor with ID: " + hostId);
          }
  
- 		try {
- 			if (_resourceMgr.maintain(hostId) && _resourceMgr.deleteHost(hostId, false, false)) {
+         if (_resourceMgr.deleteHost(hostId, false, false)) {
 -            return true;
 -        } else {
 -            return false;
 -        }
 +				return true;
 +            } else {
 +                return false;
 +            }
-         } catch (AgentUnavailableException e) {
-             s_logger.debug(e);
-             return false;
-         }
      }
  
      @Override
@@@ -254,7 -247,7 +244,7 @@@
  
          private int _interval;
  
--        private long mgmtSrvrId = MacAddress.getMacAddress().toLong();
++        private final long mgmtSrvrId = MacAddress.getMacAddress().toLong();
  
          protected DirectNetworkStatsListener(int interval) {
              _interval = interval;
@@@ -529,13 -522,20 +519,20 @@@
          return host;
      }
  
 -    @Override
 +	@Override
      public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException {
 -        if(host.getType() != Host.Type.TrafficMonitor){
 -            return null;
 -        }
 +		if(host.getType() != Host.Type.TrafficMonitor){
 +	    return null;
 +    }
  
- 		return new DeleteHostAnswer(true);
+         long hostId = host.getId();
+         _agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove);
+         _detailsDao.deleteDetails(hostId);
+         host.setGuid(null);
+         _hostDao.update(hostId, host);
+         _hostDao.remove(hostId);
+         return new DeleteHostAnswer(false);
+ 
      }
  
  }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
index 113e5e2,470d9b8..bc50d95
--- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
+++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
@@@ -68,10 -69,6 +68,10 @@@ import com.cloud.exception.InvalidParam
  import com.cloud.exception.NetworkRuleConflictException;
  import com.cloud.exception.PermissionDeniedException;
  import com.cloud.exception.ResourceUnavailableException;
- import com.cloud.network.ExternalLoadBalancerUsageManager;
++import com.cloud.network.ExternalDeviceUsageManager;
 +import com.cloud.network.IpAddress;
 +import com.cloud.network.LBHealthCheckPolicyVO;
 +import com.cloud.network.Network;
  import com.cloud.network.Network.Capability;
  import com.cloud.network.Network.Provider;
  import com.cloud.network.Network.Service;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
index 554e691,ddfa998..c5df621
--- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
+++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
@@@ -3687,13 -3690,16 +3686,16 @@@ public class VirtualNetworkApplianceMan
              rulesTO = new ArrayList<FirewallRuleTO>();
              for (FirewallRule rule : rules) {
                  FirewallRule.TrafficType traffictype = rule.getTrafficType();
 -                if(traffictype == FirewallRule.TrafficType.Ingress){
 +                if (traffictype == FirewallRule.TrafficType.Ingress) {
                      IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId());
 -                    FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr(),Purpose.Firewall,traffictype);
 +                    FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr(), Purpose.Firewall, traffictype);
                      rulesTO.add(ruleTO);
                  } else if (rule.getTrafficType() == FirewallRule.TrafficType.Egress){
+                     NetworkVO network = _networkDao.findById(guestNetworkId);
+                     NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());
+                     defaultEgressPolicy = offering.getEgressDefaultPolicy();
                      assert (rule.getSourceIpAddressId()==null) : "ipAddressId should be null for egress firewall rule. ";
-                     FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null,"",Purpose.Firewall, traffictype);
+                     FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null,"",Purpose.Firewall, traffictype, defaultEgressPolicy);
                      rulesTO.add(ruleTO);
                  }
              }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/resource/ResourceManagerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57bafc8f/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/storage/StorageManagerImpl.java
index 55b214f,b64b202..ac04886
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@@ -585,10 -578,9 +580,9 @@@ public class StorageManagerImpl extend
          if (_storageCleanupEnabled) {
              _executor.shutdown();
          }
- 
          return true;
      }
 -
 +    
      @DB
      @Override
      public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws ConnectionException {
@@@ -1097,95 -1095,51 +1097,51 @@@
      @Override
      @DB
      public void cleanupSecondaryStorage(boolean recurring) {
+         // NOTE that object_store refactor will immediately delete the object from secondary storage when deleteTemplate etc api is issued.
+         // so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry.
          try {
-             // Cleanup templates in secondary storage hosts
+             // Cleanup templates in template_store_ref
 -            List<DataStore> imageStores = this.dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
 +            List<DataStore> imageStores = dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
              for (DataStore store : imageStores) {
                  try {
                      long storeId = store.getId();
 -                    List<TemplateDataStoreVO> destroyedTemplateStoreVOs = this._templateStoreDao.listDestroyed(storeId);
 +                    List<TemplateDataStoreVO> destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId);
                      s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size()
-                             + " templates to cleanup on secondary storage host: " + store.getName());
+                             + " templates to cleanup on template_store_ref for store: " + store.getName());
                      for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) {
-                         if (!_tmpltMgr.templateIsDeleteable(destroyedTemplateStoreVO.getTemplateId())) {
-                             if (s_logger.isDebugEnabled()) {
-                                 s_logger.debug("Not deleting template at: " + destroyedTemplateStoreVO);
-                             }
-                             continue;
-                         }
- 
                          if (s_logger.isDebugEnabled()) {
-                             s_logger.debug("Deleting template store: " + destroyedTemplateStoreVO);
-                         }
- 
-                         VMTemplateVO destroyedTemplate = _vmTemplateDao.findById(destroyedTemplateStoreVO.getTemplateId());
-                         if (destroyedTemplate == null) {
-                             s_logger.error("Cannot find template : " + destroyedTemplateStoreVO.getTemplateId() + " from template table");
-                             throw new CloudRuntimeException("Template " + destroyedTemplateStoreVO.getTemplateId()
-                                     + " is found in secondary storage, but not found in template table");
+                             s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO);
                          }
-                         String installPath = destroyedTemplateStoreVO.getInstallPath();
- 
-                         TemplateInfo tmpl = tmplFactory.getTemplate(destroyedTemplateStoreVO.getTemplateId(), store);
-                         if (installPath != null) {
-                             EndPoint ep = _epSelector.select(store);
-                             Command cmd = new DeleteCommand(tmpl.getTO());
-                             Answer answer = ep.sendMessage(cmd);
- 
-                             if (answer == null || !answer.getResult()) {
-                                 s_logger.debug("Failed to delete " + destroyedTemplateStoreVO + " due to "
-                                         + ((answer == null) ? "answer is null" : answer.getDetails()));
-                             } else {
 -                        _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
 +                                _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
-                                 s_logger.debug("Deleted template at: " + destroyedTemplateStoreVO.getInstallPath());
-                             }
-                         } else {
-                             _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
-                         }
                      }
                  } catch (Exception e) {
-                     s_logger.warn("problem cleaning up templates in secondary storage store " + store.getName(), e);
+                     s_logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e);
                  }
              }
  
-             // CleanUp snapshots on Secondary Storage.
+             // CleanUp snapshots on snapshot_store_ref
              for (DataStore store : imageStores) {
 -                try {
 +                        try {
                      List<SnapshotDataStoreVO> destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId());
                      s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size()
-                             + " snapshots to cleanup on secondary storage host: " + store.getName());
+                             + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName());
                      for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) {
                          // check if this snapshot has child
                          SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store);
                          if ( snap.getChild() != null ){
                              s_logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child");
 -                            continue;
 -                        }
 +                                    continue;
 +                                }
  
                          if (s_logger.isDebugEnabled()) {
-                             s_logger.debug("Deleting snapshot on store: " + destroyedSnapshotStoreVO);
+                             s_logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO);
 -                        }
 +                            }
  
-                         String installPath = destroyedSnapshotStoreVO.getInstallPath();
- 
-                         if (installPath != null) {
-                             EndPoint ep = _epSelector.select(store);
-                             DeleteCommand cmd = new DeleteCommand(snap.getTO());
-                             Answer answer = ep.sendMessage(cmd);
-                             if (answer == null || !answer.getResult()) {
-                                 s_logger.debug("Failed to delete " + destroyedSnapshotStoreVO + " due to "
-                                         + ((answer == null) ? "answer is null" : answer.getDetails()));
-                             } else {
-                                 _volumeStoreDao.remove(destroyedSnapshotStoreVO.getId());
-                                 s_logger.debug("Deleted snapshot at: " + destroyedSnapshotStoreVO.getInstallPath());
-                             }
-                         } else {
 -                        _snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId());
 -                    }
 +                            _snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId());
 +                        }
-                     }
  
                  } catch (Exception e2) {
-                     s_logger.warn("problem cleaning up snapshots in secondary storage store " + store.getName(), e2);
+                     s_logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2);
                  }
  
              }
@@@ -1195,30 -1149,12 +1151,12 @@@
                  try {
                      List<VolumeDataStoreVO> destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId());
                      s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size()
-                             + " volumes to cleanup on secondary storage host: " + store.getName());
+                             + " volumes to cleanup on volume_store_ref for store: " + store.getName());
                      for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) {
                          if (s_logger.isDebugEnabled()) {
-                             s_logger.debug("Deleting volume on store: " + destroyedStoreVO);
+                             s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO);
                          }
- 
-                         String installPath = destroyedStoreVO.getInstallPath();
- 
-                         VolumeInfo vol = volFactory.getVolume(destroyedStoreVO.getVolumeId(), store);
- 
-                         if (installPath != null) {
-                             EndPoint ep = _epSelector.select(store);
-                             DeleteCommand cmd = new DeleteCommand(vol.getTO());
-                             Answer answer = ep.sendMessage(cmd);
-                             if (answer == null || !answer.getResult()) {
-                                 s_logger.debug("Failed to delete " + destroyedStoreVO + " due to "
-                                         + ((answer == null) ? "answer is null" : answer.getDetails()));
-                             } else {
 -                        _volumeStoreDao.remove(destroyedStoreVO.getId());
 +                                _volumeStoreDao.remove(destroyedStoreVO.getId());
-                                 s_logger.debug("Deleted volume at: " + destroyedStoreVO.getInstallPath());
-                             }
-                         } else {
-                             _volumeStoreDao.remove(destroyedStoreVO.getId());
-                         }
                      }
  
                  } catch (Exception e2) {
@@@ -1338,10 -1274,10 +1276,10 @@@
      }
  
      @Override
 -    public void onManagementNodeLeft(List<ManagementServerHostVO> nodeList, long selfNodeId) {
 -        for (ManagementServerHostVO vo : nodeList) {
 +    public void onManagementNodeLeft(List<? extends ManagementServerHost> nodeList, long selfNodeId) {
 +        for (ManagementServerHost vo : nodeList) {
              if (vo.getMsid() == _serverId) {
-                 s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + vo.getMsid());
+                 s_logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid());
                  List<Long> poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid());
                  if (poolIds.size() > 0) {
                      for (Long poolId : poolIds) {
@@@ -1577,9 -1550,10 +1552,10 @@@
                      allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
                  }
              }
-             if (volume.getState() != Volume.State.Ready)
+             if (volume.getState() != Volume.State.Ready) {
                  totalAskingSize = totalAskingSize + volume.getSize();
 -            }
 +        }
+         }
  
          long totalOverProvCapacity;
          if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {


[26/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
ACS4.1_Quick_install_guide


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/676d8744
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/676d8744
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/676d8744

Branch: refs/heads/vmsync
Commit: 676d8744ef6801cf3fcf6425d37ba27a149e5615
Parents: bd784fb
Author: Meng Han <me...@ufl.edu>
Authored: Sat Jun 29 11:32:36 2013 -0400
Committer: Sebastien Goasguen <ru...@gmail.com>
Committed: Sat Jun 29 15:40:12 2013 -0400

----------------------------------------------------------------------
 docs/qig/en-US/Author_Group.xml     |  32 ++++
 docs/qig/en-US/Book_Info.xml        |  52 +++++++
 docs/qig/en-US/Chapter.xml          |  53 +++++++
 docs/qig/en-US/Environment.xml      | 258 +++++++++++++++++++++++++++++++
 docs/qig/en-US/Management.xml       |  99 ++++++++++++
 docs/qig/en-US/Overview.xml         |  93 +++++++++++
 docs/qig/en-US/Preface.xml          |  33 ++++
 docs/qig/en-US/Revision_History.xml |  42 +++++
 docs/qig/en-US/config.xml           | 177 +++++++++++++++++++++
 docs/qig/en-US/kvm.xml              | 142 +++++++++++++++++
 docs/qig/en-US/qig.ent              |  22 +++
 docs/qig/en-US/qig.xml              |  36 +++++
 12 files changed, 1039 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Author_Group.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Author_Group.xml b/docs/qig/en-US/Author_Group.xml
new file mode 100644
index 0000000..432ef6f
--- /dev/null
+++ b/docs/qig/en-US/Author_Group.xml
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE authorgroup PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<authorgroup>
+    <author>
+        <firstname>Apache</firstname>
+        <surname>CloudStack</surname>
+    </author>
+</authorgroup>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Book_Info.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Book_Info.xml b/docs/qig/en-US/Book_Info.xml
new file mode 100644
index 0000000..e356de4
--- /dev/null
+++ b/docs/qig/en-US/Book_Info.xml
@@ -0,0 +1,52 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE bookinfo PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "Runbook.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<bookinfo id="book-qig-qig">
+    <title>Quick Install Guide</title>
+    <subtitle>Prescriptive instructions for deploying Apache CloudStack</subtitle>
+    <productname>Apache CloudStack</productname>
+    <productnumber>4.0.2</productnumber>
+    <edition>0</edition>
+    <pubsnumber>0</pubsnumber>
+    <abstract>
+        <para>
+            This guide is designed to provide a strict environment to guarantee 
+                        a higher degree of success in initial deployments of Apache CloudStack. 
+                        All of the elements of the environment will be provided to you. 
+                        Apache CloudStack is capable of much more complex configurations, 
+                        but they are beyond the scope of this document.
+        </para>
+    </abstract>
+    <corpauthor>
+        <inlinemediaobject>
+            <imageobject>
+                <imagedata fileref="Common_Content/images/title_logo.svg" format="SVG" />
+            </imageobject>
+        </inlinemediaobject>
+    </corpauthor>
+    <xi:include href="Common_Content/Legal_Notice.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="Author_Group.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</bookinfo>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Chapter.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Chapter.xml b/docs/qig/en-US/Chapter.xml
new file mode 100644
index 0000000..4adf63c
--- /dev/null
+++ b/docs/qig/en-US/Chapter.xml
@@ -0,0 +1,53 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "Runbook.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-Runbook-Test_Chapter">
+  <title>Test Chapter</title>
+  <para>
+    This is a test paragraph
+  </para>
+  <section id="sect-Runbook-Test_Chapter-Test_Section_1">
+    <title>Test Section 1</title>
+      <para>
+        This is a test paragraph in a section
+      </para>
+    </section>
+    
+    <section id="sect-Runbook-Test_Chapter-Test_Section_2">
+      <title>Test Section 2</title>
+    <para>
+      This is a test paragraph in Section 2
+      <orderedlist>
+        <listitem>
+          <para>
+            listitem text
+          </para>
+        </listitem>
+      </orderedlist>
+    </para>
+  </section>
+
+</chapter>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Environment.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Environment.xml b/docs/qig/en-US/Environment.xml
new file mode 100644
index 0000000..e48b405
--- /dev/null
+++ b/docs/qig/en-US/Environment.xml
@@ -0,0 +1,258 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-qig-Environment">
+  <title>Environment</title>
+  <para>
+      Before you begin, you need to prepare the environment before you install CloudStack. 
+      We will go over the steps to prepare now.
+  </para>
+  <section id="sect-qig-Environment-operatingsys">
+    <title>Operating System</title>
+    <para>
+      Using the CentOS 6.4 x86_64 minimal install ISO, you'll need to install CentOS
+      on your hardware. The defaults will generally be acceptable for this installation.
+    </para>
+    <para>
+      Once this installation is complete, you'll want to connect to your freshly
+      installed machine via SSH as the root user. Note that you should not allow root 
+      logins in a production environment, so be sure to turn off remote logins once you 
+      have finished the installation and configuration.
+    </para>
+    <section id="sect-qig-Environment-operatingsys-network">
+      <title>Configuring the network</title>
+      <para>
+        By default the network will not come up on your hardware and you
+        will need to configure it to work in your environment. Since we 
+        specified that there will be no DHCP server in this environment
+        we will be manually configuring your network interface. We will
+        assume, for the purposes of this exercise, that eth0 is the only network
+        interface that will be connected and used. 
+      </para>
+      <para>
+        Connecting via the console you should login as root. Check the 
+        file <filename>/etc/sysconfig/network-scripts/ifcfg-eth0</filename>,
+        it will look like this by default:
+        <screen>
+DEVICE="eth0"
+HWADDR="52:54:00:B9:A6:C0"
+NM_CONTROLLED="yes"
+ONBOOT="no"
+        </screen>
+      </para>
+      <para>
+        Unfortunately, this configuration will not permit you to connect to the network,
+        and is also unsuitable for our purposes with CloudStack. We want to 
+    configure that file so that it specifies the IP address, netmask, etc., as shown
+    in the following example:
+      </para>
+      <important>
+        <title>Hardware Addresses</title>
+    <para>You should not use the hardware address (aka MAC address) from our example
+        for your configuration. It is network interface specific, so you should keep the 
+        address already provided in the HWADDR directive.
+        </para>
+      </important>
+      <screen>
+DEVICE=eth0
+HWADDR=52:54:00:B9:A6:C0
+NM_CONTROLLED=no
+ONBOOT=yes
+BOOTPROTO=none
+IPADDR=172.16.10.2
+NETMASK=255.255.255.0
+GATEWAY=172.16.10.1
+DNS1=8.8.8.8
+DNS2=8.8.4.4
+      </screen>
+      <note>
+        <title>IP Addressing</title>
+        <para>Throughout this document we are assuming that you will
+        have a /24 network for your CloudStack implementation. This can be any
+        RFC 1918 network. However, we are assuming that you will match the 
+        machine address that we are using. Thus we may use 
+        <userinput><replaceable>172.16.10</replaceable>.2</userinput> and because
+        you might be using the 192.168.55.0/24 network you would use 
+        <userinput><replaceable>192.168.55</replaceable>.2</userinput>
+        </para>
+      </note>
+      <para> Now that we have the configuration files properly set up, we need to run a
+      few commands to start up the network</para>
+      <screen><prompt># </prompt><userinput><command>chkconfig</command> network on</userinput></screen>
+      <screen><prompt># </prompt><userinput><command>service</command> network start</userinput></screen>
+    </section>
+    <section id="sect-qig-Environment-operatingsys-hostname">
+      <title>Hostname</title>
+      <para>
+        Cloudstack requires that the hostname be properly set. If you used the default
+        options in the installation, then your hostname is currently set to 
+        localhost.localdomain. To test this we will run: </para>
+        <screen><prompt># </prompt><userinput>hostname --fqdn</userinput></screen>
+        <para>At this point it will likely return:</para>
+        <screen>localhost</screen>
+        <para>To rectify this situation - we'll set the hostname by editing the
+        <filename>/etc/hosts</filename> file so that it follows a similar format to this example:<screen>
+127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
+172.16.10.2 srvr1.cloud.priv
+</screen>
+        </para>
+        <para>After you've modified that file, go ahead and restart the network using:</para>
+<screen><prompt># </prompt><userinput><command>service</command> network restart</userinput></screen>
+        <para>Now recheck with the <command>hostname --fqdn</command> command and ensure that it returns 
+        a FQDN response</para>
+    </section>
+    <section id="sect-qig-Environment-operatingsys-selinux">
+      <title>SELinux</title>
+      <para>At the moment, for CloudStack to work properly SELinux must be 
+      set to permissive. We want to both configure this for future boots and modify it
+      in the current running system.
+      </para>
+      <para>To configure SELinux to be permissive in the running system we need to run 
+      the following command:</para>
+      <screen><prompt># </prompt><userinput><command>setenforce</command> 0</userinput></screen>
+      <para> 
+        To ensure that it remains in that state we need to configure the file 
+        <filename>/etc/selinux/config</filename> to reflect the permissive state, 
+    as shown in this example: </para>
+      <screen>
+
+# This file controls the state of SELinux on the system.
+# SELINUX= can take one of these three values:
+#     enforcing - SELinux security policy is enforced.
+#     permissive - SELinux prints warnings instead of enforcing.
+#     disabled - No SELinux policy is loaded.
+SELINUX=permissive
+# SELINUXTYPE= can take one of these two values:
+#     targeted - Targeted processes are protected,
+#     mls - Multi Level Security protection.
+SELINUXTYPE=targeted
+      </screen>
+
+    </section>
+    <section id="sect-qig-Environment-operatingsys-ntp">
+      <title>NTP</title>
+      <para>NTP configuration is a necessity for keeping all of the clocks in your cloud
+      servers in sync. However, NTP is not installed by default. So we'll install and 
+      and configure NTP at this stage. Installation is accomplished as follows:
+      </para>
+      <screen><prompt># </prompt><userinput><command>yum</command> -y install ntp</userinput></screen>
+      <para>The actual default configuration is fine for our purposes, so we merely need to
+      enable it and set it to start on boot as follows:</para>
+      <screen><prompt># </prompt><userinput><command>chkconfig</command> ntpd on</userinput></screen>
+      <screen><prompt># </prompt><userinput><command>service</command> ntpd start</userinput></screen>
+    </section>
+    <section id="sect-qig-Environment-operatingsys-repoconfig">
+      <title>Configuring the CloudStack Package Repository</title>
+      <para>
+        We need to configure the machine to use a CloudStack package repository.
+        <important>
+          <title>The below repository is not an official Apache CloudStack project repository</title>
+          <para>
+            The Apache CloudStack official releases are source code. As such there are no
+            'official' binaries available. The full installation guide describes how to take
+            the source release and generate RPMs and and yum repository. This guide attempts
+            to keep things as simple as possible, and thus we are using one of the
+            community-provided yum repositories.
+          </para>
+        </important>
+      </para>
+      <para>To add the CloudStack repository, create <filename>/etc/yum.repos.d/cloudstack.repo</filename> and insert the following information.</para>
+      <programlisting>
+[cloudstack]
+name=cloudstack
+baseurl=http://cloudstack.apt-get.eu/rhel/4.1/
+enabled=1
+gpgcheck=0
+      </programlisting>
+    </section>
+  </section>
+  <section id="sect-qig-Environment-nfs">
+    <title>NFS</title>
+    <para>
+      Our configuration is going to use NFS for both primary and secondary
+      storage. We are going to go ahead and setup two NFS shares for those 
+      purposes. We'll start out by installing
+      <application>nfs-utils</application>.
+    </para>
+    <screen><prompt># </prompt><userinput><command>yum</command> install nfs-utils</userinput></screen>
+    <para>
+      We now need to configure NFS to serve up two different shares. This is handled comparatively easily
+      in the <filename>/etc/exports</filename> file. You should ensure that it has the following content:
+    </para>
+    <screen>
+/secondary *(rw,async,no_root_squash)
+/primary   *(rw,async,no_root_squash)
+    </screen>
+    <para>
+      You will note that we specified two directories that don't exist (yet) on the system. 
+      We'll go ahead and create those directories and set permissions appropriately on them with the following commands:
+    </para>
+    <screen>
+<prompt># </prompt><userinput><command>mkdir</command> /primary</userinput>
+<prompt># </prompt><userinput><command>mkdir</command> /secondary</userinput>
+    </screen>
+    <para>CentOS 6.x releases use NFSv4 by default. NFSv4 requires that domain setting matches on all clients. 
+    In our case, the domain is cloud.priv, so ensure that the domain setting in <filename>/etc/idmapd.conf</filename>
+    is uncommented and set as follows:</para>
+    <screen>Domain = cloud.priv</screen>
+    <para>Now you'll need uncomment the configuration values in the file <filename>/etc/sysconfig/nfs</filename></para>
+    <screen>
+LOCKD_TCPPORT=32803
+LOCKD_UDPPORT=32769
+MOUNTD_PORT=892
+RQUOTAD_PORT=875
+STATD_PORT=662
+STATD_OUTGOING_PORT=2020
+    </screen>
+    <para> Now we need to configure the firewall to permit incoming NFS connections. 
+    Edit the file <filename>/etc/sysconfig/iptables</filename>
+    </para>
+    <screen>
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 111 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 111 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 2049 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 32803 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 32769 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 892 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 892 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 875 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 875 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 662 -j ACCEPT
+-A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 662 -j ACCEPT
+    </screen>
+    <para>Now you can restart the iptables service with the following command:
+    </para>
+    <screen><prompt># </prompt><userinput><command>service</command> iptables restart</userinput></screen>
+    <para>We now need to configure nfs service to start on boot and actually start it on the host by
+    executing the following commands:</para>
+    <screen>
+<prompt># </prompt><userinput><command>service</command> rpcbind start</userinput>
+<prompt># </prompt><userinput><command>service</command> nfs start</userinput>
+<prompt># </prompt><userinput><command>chkconfig</command> rpcbind on</userinput>
+<prompt># </prompt><userinput><command>chkconfig</command> nfs on</userinput>
+    </screen>
+  </section>
+</chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Management.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Management.xml b/docs/qig/en-US/Management.xml
new file mode 100644
index 0000000..8c6040f
--- /dev/null
+++ b/docs/qig/en-US/Management.xml
@@ -0,0 +1,99 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-qig-Management">
+  <title>Installation of the management server</title>
+  <para>
+    Now it is time to start installing CloudStack's management server
+    and some of the related components. 
+  </para>
+  <section id="sect-qig-Management-database">
+    <title>Database Installation and Configuration</title>
+      <para>
+        We'll start out by installing <application>MySQL</application> and configuring 
+        some options to ensure CloudStack runs well.
+      </para>
+      <para>
+        To install <application>MySQL</application> run the following command:
+        <screen><prompt># </prompt><userinput><command>yum</command> -y install mysql-server</userinput></screen>
+      </para>
+      <para>With <application>MySQL</application> installed we need to make
+      a few configuration changes to <filename>/etc/my.cnf</filename>. 
+      Specifically we need to add the following options to the [mysqld] section:
+        <screen>
+innodb_rollback_on_timeout=1
+innodb_lock_wait_timeout=600
+max_connections=350
+log-bin=mysql-bin
+binlog-format = 'ROW' 
+        </screen>
+      </para>
+      <para>
+        Now that <application>MySQL</application> is properly configured we can
+        start it and configure it to start on boot as follows:
+        <screen>
+<prompt># </prompt><userinput><command>service</command> mysqld start</userinput>
+<prompt># </prompt><userinput><command>chkconfig</command> mysqld on</userinput>
+        </screen>
+      </para>
+
+  </section>
+    
+  <section id="sect-qig-Management-installation">
+    <title>Installation</title>
+    <para>We are now going to install the management server. We do that by executing the following command:
+      <screen><prompt># </prompt><userinput><command>yum</command> -y install cloud-client</userinput></screen>
+    </para>
+    <para>With the application itself installed we can now setup the database, we'll do that with the following command
+      and options: 
+    </para>
+    <screen><prompt># </prompt><userinput><command>cloudstack-setup-databases</command> cloud:<replaceable>password</replaceable>@localhost --deploy-as=root</userinput></screen>
+    <para>When this process is finished, you should see a message like "CloudStack has successfully initialized the database."
+    </para>
+    <para>Now that the database has been created, we can take the final step in setting up the management server by issuing the following command:</para>
+    <screen><prompt># </prompt><userinput><command>cloudstack-setup-management</command></userinput></screen>
+  </section>
+  <section id="sect-qig-Management-templatesetup">
+    <title>System Template Setup</title>
+    <para>CloudStack uses a number of system VMs to provide functionality for 
+      accessing the console of virtual machines, providing various networking 
+      services, and managing various aspects of storage. This step will 
+      acquire those system images ready for deployment when we bootstrap
+      your cloud.
+    </para>
+    <para>
+      Now we need to download the system VM template and deploy that to the
+      share we just mounted. The management server includes a script to properly
+      manipulate the system VMs images. 
+    </para>
+    <screen><prompt># </prompt><userinput><command>/usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt</command> -m  /secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F</userinput>
+    </screen>
+    <para>
+      That concludes our setup of the management server. We still need to 
+      configure CloudStack, but we will do that after we get our hypervisor
+      set up. 
+    </para>
+  </section>
+</chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Overview.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Overview.xml b/docs/qig/en-US/Overview.xml
new file mode 100644
index 0000000..31915f5
--- /dev/null
+++ b/docs/qig/en-US/Overview.xml
@@ -0,0 +1,93 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-qig-Overview">
+  <title>Overview</title>
+    <para>
+    Infrastructure-as-a-Service (IaaS) clouds can be a complex thing to build, 
+    and by definition they have a plethora of options, which often lead to confusion
+    for even experienced admins who are newcomers to building cloud platforms. 
+    The goal for this runbook is to provide a straightforward set of instructions 
+    to get you up and running with CloudStack with a minimum amount of trouble. 
+    </para>
+    <section id="sect-qig-Overview-What_building">
+      <title>What exactly are we building?</title>
+      <para>
+      This runbook will focus on building a CloudStack cloud using KVM with 
+      CentOS 6.4 with NFS storage on a flat layer-2 network utilizing 
+          layer-3 network isolation (aka Security Groups), and doing it all
+          on a single piece of hardware.   
+      </para>
+      <para> 
+        KVM, or Kernel-based Virtual Machine is a virtualization technology
+        for the Linux kernel. KVM supports native virtualization atop 
+        processors with hardware virtualization extensions. 
+      </para>
+      <para>
+        Security Groups act as distributed firewalls that control access
+        to a group of virtual machines.
+      </para>
+    </section>
+    <section id="sect-qig-Overview-hilevel">
+      <title>High level overview of the process</title>
+      <para> 
+        Before we actually get to installing CloudStack, we'll start with 
+        installing our base operating system, and then configuring that to act
+        as an NFS server for several types of storage. We'll install the 
+        management server, download the systemVMs, and finally install the agent
+        software. Finally we'll spend a good deal of time configuring the entire
+        cloud in the CloudStack web interface. 
+      </para>
+    </section>    
+    <section id="sect-qig-Overview-prereqs">
+      <title>Prerequisites</title>
+        <para>
+          To complete this runbook you'll need the following items:
+          <orderedlist>
+            <listitem>
+              <para>
+                At least one computer which supports hardware virtualization. 
+              </para>
+            </listitem>
+            <listitem>
+              <para>
+                The 
+                <ulink url="http://mirror.stanford.edu/yum/pub/centos/6.4/isos/x86_64/CentOS-6.4-x86_64-minimal.iso">
+                CentOS 6.4 x86_64 minimal install CD
+                </ulink>
+              </para>
+            </listitem>
+            <listitem>
+              <para>
+          A /24 network with the gateway being at xxx.xxx.xxx.1, no DHCP should be on this network and 
+          none of the computers running CloudStack will have a dynamic address. Again this is done for 
+          the sake of simplicity. 
+              </para>
+            </listitem>
+          </orderedlist>
+        </para>
+      </section>
+
+</chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Preface.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Preface.xml b/docs/qig/en-US/Preface.xml
new file mode 100644
index 0000000..d6ba80e
--- /dev/null
+++ b/docs/qig/en-US/Preface.xml
@@ -0,0 +1,33 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE preface PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<preface id="pref-qig-Preface">
+    <title>Preface</title>
+    <xi:include href="Common_Content/Conventions.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="Feedback.xml" xmlns:xi="http://www.w3.org/2001/XInclude"><xi:fallback xmlns:xi="http://www.w3.org/2001/XInclude"><xi:include href="Common_Content/Feedback.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    </xi:fallback>
+    </xi:include>
+</preface>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/Revision_History.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/Revision_History.xml b/docs/qig/en-US/Revision_History.xml
new file mode 100644
index 0000000..1ff4d77
--- /dev/null
+++ b/docs/qig/en-US/Revision_History.xml
@@ -0,0 +1,42 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE appendix PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<appendix id="appe-qig-Revision_History">
+    <title>Revision History</title>
+    <simpara>
+        <revhistory>
+            <revision>
+                <revnumber>0-0</revnumber>
+                <date>Fri Jun 28 2013</date>
+                <revdescription>
+                    <simplelist>
+                        <member>Initial creation of book by publican</member>
+                    </simplelist>
+                </revdescription>
+            </revision>
+        </revhistory>
+    </simpara>
+</appendix>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/config.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/config.xml b/docs/qig/en-US/config.xml
new file mode 100644
index 0000000..7ff7a72
--- /dev/null
+++ b/docs/qig/en-US/config.xml
@@ -0,0 +1,177 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-qig-Config">
+  <title>Configuration</title>
+  <para>
+    As we noted before we will be using security groups to provide isolation
+    and by default that implies that we'll be using a flat layer-2 network. 
+    It also means that the simplicity of our setup means that we can use the 
+    quick installer. 
+  </para>
+  <section id="sect-qig-Config-UI_Access">
+    <title>UI Access</title>
+      <para>
+        To get access to CloudStack's web interface, merely point your 
+        browser to http://<replaceable>172.16.10.</replaceable>2:8080/client
+        The default username is 'admin', and the default password is 'password'.
+        You should see a splash screen that allows you to choose several options
+        for setting up CloudStack. You should choose the 
+        <option>Continue with Basic Setup</option> option. 
+      </para>
+      <para>
+        You should now see a prompt requiring you to change the password for
+        the admin user. Please do so. 
+      </para>
+  </section>
+  <section id="sect-qig-Config-Zone">
+    <title>Setting up a Zone</title>
+    <para>
+      A zone is the largest organization entity in CloudStack - and we'll be creating one, this 
+      should be the screen that you see in front of you now. And for us there are 5 pieces of
+      information that we need.
+      <orderedlist>
+        <listitem>
+          <para>Name - we will set this to the ever-descriptive 'Zone1' for our cloud.</para>
+        </listitem>
+        <listitem>
+          <para>Public DNS 1 - we will set this to '8.8.8.8' for our cloud.</para>
+        </listitem>
+        <listitem>
+          <para>Public DNS 2 - we will set this to '8.8.4.4' for our cloud.</para>
+        </listitem>
+        <listitem>
+          <para>Internal DNS1 - we will also set this to '8.8.8.8' for our cloud.</para>
+        </listitem>
+        <listitem>
+          <para>Internal DNS2 - we will also set this to '8.8.8.4' for our cloud.</para>
+        </listitem>
+     </orderedlist>  
+    </para>
+    <note>
+      <title>Notes about DNS settings</title>
+      <para>
+        CloudStack distinguishes between internal and public DNS. Internal 
+        DNS is assumed to be capable of resolving internal-only 
+        hostnames, such as your NFS server’s DNS name. Public DNS is 
+        provided to the guest VMs to resolve public IP addresses. You can 
+        enter the same DNS server for both types, but if you do so, you 
+        must make sure that both internal and public IP addresses can 
+        route to the DNS server. In our specific case we will not use any
+        names for resources internally, and we have indeed them set to look
+        to the same external resource so as to not add a namerserver setup
+        to our list of requirements. 
+      </para>
+    </note>
+    </section>
+    <section id="sect-qig-Config-Pod">
+      <title>Pod Configuration</title>
+      <para>Now that we've added a Zone, the next step that comes up is a prompt
+        for information regading a pod. Which is looking for 4 items. 
+        <orderedlist>
+          <listitem>
+             <para>Name - We'll use Pod1 for our cloud.</para>
+          </listitem>
+          <listitem>
+             <para>Gateway - We'll use <replaceable>172.16.10</replaceable>.1 as our gateway</para>
+          </listitem>
+          <listitem>
+             <para>Netmask - We'll use 255.255.255.0</para>
+          </listitem>
+          <listitem>
+             <para>Start/end reserved system IPs - we will use 172.16.10.10-172.16.10.20 </para>
+          </listitem>
+          <listitem>
+             <para>Guest gateway - We'll use <replaceable>172.16.10</replaceable>.1</para>
+          </listitem>
+          <listitem>
+             <para>Guest netmask - We'll use 255.255.255.0</para>
+          </listitem>
+          <listitem>
+             <para>Guest start/end IP - We'll use <replaceable>172.16.10.</replaceable>30-<replaceable>172.16.10.</replaceable>200</para>
+          </listitem>
+        </orderedlist>
+      </para>
+    </section>
+    <section id="Runbook-config-cluster">
+      <title>Cluster</title>
+      <para> Now that we've added a Zone, we need only add a few more items for configuring the cluster. 
+        <orderedlist>
+          <listitem>
+            <para>Name - We'll use Cluster1</para>
+          </listitem>
+          <listitem>
+            <para>Hypervisor - Choose KVM</para>
+          </listitem>
+        </orderedlist>
+      </para>
+      <para> You should be prompted to add the first host to your cluster at this point. Only a few bits of information are needed. 
+        <orderedlist>
+          <listitem>
+             <para>Hostname - we'll use the IP address <replaceable>172.16.10.</replaceable>2 since we didn't set up a DNS server.</para>
+          </listitem>
+          <listitem>
+             <para>Username - we'll use 'root' </para>
+          </listitem>
+          <listitem>
+             <para>Password - enter the operating system password for the root user</para>
+          </listitem>
+        </orderedlist>
+      </para>
+      <section id="Runbook-config-cluster-pristorage">
+        <title>Primary Storage</title> 
+      <para>With your cluster now setup - you should be prompted for primary storage information. Choose NFS as the storage type and then enter the following values in the fields:
+        <orderedlist>
+          <listitem>
+             <para>Name - We'll use 'Primary1'</para>
+          </listitem>
+          <listitem>
+             <para>Server - We'll be using the IP address <replaceable>172.16.10.</replaceable>2</para>
+          </listitem>
+          <listitem>
+            <para>Path - Well define /primary as the path we are using </para>
+          </listitem>
+        </orderedlist>
+      </para>
+      </section>
+      <section id="Runbook-config-cluster-secstorage">
+        <title>Secondary Storage</title>
+      <para> If this is a new zone, you'll be prompted for secondary storage information - populate it as follows:
+        <orderedlist>
+          <listitem>
+            <para>NFS server - We'll use the IP address <replaceable>172.16.10.</replaceable>2</para>
+          </listitem>
+          <listitem>
+            <para>Path - We'll use /secondary</para>
+          </listitem>
+        </orderedlist>
+      </para>
+      <para> Now, click Launch and your cloud should begin setup - it may take several minutes depending on your internet connection speed for setup to finalize.</para>
+    </section>
+    </section>
+  
+
+</chapter>
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/kvm.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/kvm.xml b/docs/qig/en-US/kvm.xml
new file mode 100644
index 0000000..91ed9d5
--- /dev/null
+++ b/docs/qig/en-US/kvm.xml
@@ -0,0 +1,142 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "qig.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<chapter id="chap-qig-KVM">
+  <title>KVM Setup and installation</title>
+  <para>
+    KVM is the hypervisor we'll be using - we will recover the initial setup
+    which has already been done on the hypervisor host and cover installation
+    of the agent software, you can use the same steps to add additional KVM 
+    nodes to your CloudStack environment. 
+  </para>
+  <section id="sect-qig-KVM-Prerequisites">
+    <title>Prerequisites</title>
+    <para>
+      We explicitly are using the management server as a compute node as well, 
+      which means that we have already performed many of the prerequisite steps
+      when setting up the management server, but we will list them here for
+      clarity. Those steps are:
+      <orderedlist>
+        <listitem>
+          <para>
+            <xref linkend="sect-qig-Environment-operatingsys-network" />
+          </para>
+        </listitem>
+        <listitem>
+          <para>
+            <xref linkend="sect-qig-Environment-operatingsys-hostname" />
+          </para>
+        </listitem>
+        <listitem>
+          <para>
+            <xref linkend="sect-qig-Environment-operatingsys-selinux" />
+          </para>
+        </listitem>
+        <listitem>
+          <para>
+            <xref linkend="sect-qig-Environment-operatingsys-ntp" />
+          </para>
+        </listitem>
+        <listitem>
+          <para>
+            <xref linkend="sect-qig-Environment-operatingsys-repoconfig" />
+          </para>
+        </listitem>
+      </orderedlist>
+    </para>
+    <para>
+      You shouldn't need to do that for the management server, of course, but
+      any additional hosts will need for you to complete the above steps. 
+    </para>
+  </section>
+   
+  <section id="sect-qig-KVM-Installation">
+    <title>Installation</title>
+    <para>Installation of the KVM agent is trivial with just a single command, but afterwards we'll need to configure a few things.</para>
+    <screen><prompt># </prompt><userinput><command>yum</command> -y install cloud-agent</userinput></screen>
+    <section id="sect-qig-KVM-Configuration">
+      <title>KVM Configuration</title>
+      <para>We have two different parts of KVM to configure, libvirt, and QEMU.</para>
+      <section id="sect-qig-KVM-Configuration-qemu">
+        <title>QEMU Configuration</title>
+        <para>
+          KVM configuration is relatively simple at only a single item. We need to 
+          edit the QEMU VNC configuration. This is done by editing 
+          <filename>/etc/libvirt/qemu.conf</filename> and ensuring the following 
+          line is present and uncommented.
+          <screen>vnc_listen=0.0.0.0</screen>
+        </para>
+      </section>
+      <section id="sect-qig-KVM-Configuration-libvirt">
+        <title>Libvirt Configuration</title>
+        <para>
+          CloudStack uses libvirt for managing virtual machines. Therefore it
+          is vital that libvirt is configured correctly. Libvirt is a dependency
+          of cloud-agent and should already be installed.
+        </para>
+        <orderedlist>
+          <listitem>
+            <para>In order to have live migration working libvirt has to listen
+            for unsecured TCP connections. We also need to turn off libvirts 
+            attempt to use Multicast DNS advertising. Both of these settings 
+            are in <filename>/etc/libvirt/libvirtd.conf</filename>
+            </para>
+            <para>Set the following paramaters:</para>
+            <programlisting>listen_tls = 0</programlisting>
+            <programlisting>listen_tcp = 1</programlisting>
+            <programlisting>tcp_port = "16059"</programlisting>
+            <programlisting>auth_tcp = "none"</programlisting>
+            <programlisting>mdns_adv = 0</programlisting>
+          </listitem>
+          <listitem>
+            <para>Turning on "listen_tcp" in libvirtd.conf is not enough, we have to change the parameters as well:</para>
+            <para>On RHEL or CentOS modify <filename>/etc/sysconfig/libvirtd</filename>:</para>
+            <para>Uncomment the following line:</para>
+            <programlisting>#LIBVIRTD_ARGS="--listen"</programlisting>
+            <para>On Ubuntu: modify <filename>/etc/init/libvirt-bin.conf</filename></para>
+            <para>Change the following line (at the end of the file):</para>
+            <programlisting>exec /usr/sbin/libvirtd -d</programlisting>
+            <para>to (just add -l)</para>
+            <programlisting>exec /usr/sbin/libvirtd -d -l</programlisting>
+          </listitem>
+          <listitem>
+            <para>Restart libvirt</para>
+            <para>In RHEL or CentOS:</para>
+            <programlisting><command>$ service libvirtd restart</command></programlisting>
+            <para>In Ubuntu:</para>
+            <programlisting><command>$ service libvirt-bin restart</command></programlisting>
+          </listitem>
+        </orderedlist>
+      </section>
+      <section id="sect-qig-KVM-Configuration-finish">
+        <title>KVM configuration complete</title>
+        <para>
+        That concludes our installation and configuration of KVM, and we'll now move to using the CloudStack UI
+        for the actual configuration of our cloud. 
+        </para>
+      </section>
+    </section>
+  </section>
+</chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/qig.ent
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/qig.ent b/docs/qig/en-US/qig.ent
new file mode 100644
index 0000000..3b1649a
--- /dev/null
+++ b/docs/qig/en-US/qig.ent
@@ -0,0 +1,22 @@
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<!ENTITY PRODUCT "Apache CloudStack">
+<!ENTITY BOOKID "Quick Install Guide">
+<!ENTITY YEAR "2013">
+<!ENTITY HOLDER "Apache Software Foundation">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/676d8744/docs/qig/en-US/qig.xml
----------------------------------------------------------------------
diff --git a/docs/qig/en-US/qig.xml b/docs/qig/en-US/qig.xml
new file mode 100644
index 0000000..00dd2e4
--- /dev/null
+++ b/docs/qig/en-US/qig.xml
@@ -0,0 +1,36 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "Runbook.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+
+<book>
+    <xi:include href="Book_Info.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="Preface.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="Overview.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+        <xi:include href="Environment.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+        <xi:include href="Management.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+        <xi:include href="kvm.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+        <xi:include href="config.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+    <xi:include href="Revision_History.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+<!--    <index />  -->
+</book>


[39/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
build fix


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/03040348
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/03040348
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/03040348

Branch: refs/heads/vmsync
Commit: 030403482212801a1a6afeb294678d32bae19416
Parents: 9f12a25
Author: radhikap <ra...@citrix.com>
Authored: Mon Jul 1 14:06:53 2013 +0530
Committer: radhikap <ra...@citrix.com>
Committed: Mon Jul 1 14:07:21 2013 +0530

----------------------------------------------------------------------
 docs/en-US/networks.xml    | 1 +
 docs/en-US/portable-ip.xml | 9 ++++-----
 2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/03040348/docs/en-US/networks.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/networks.xml b/docs/en-US/networks.xml
index d1fc541..e00beac 100644
--- a/docs/en-US/networks.xml
+++ b/docs/en-US/networks.xml
@@ -48,6 +48,7 @@
   <xi:include href="dns-dhcp.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <xi:include href="vpn.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <xi:include href="elastic-ip.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <xi:include href="portable-ip.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <xi:include href="pvlan.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <xi:include href="inter-vlan-routing.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <xi:include href="configure-vpc.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/03040348/docs/en-US/portable-ip.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/portable-ip.xml b/docs/en-US/portable-ip.xml
index 3c87e54..83d5b43 100644
--- a/docs/en-US/portable-ip.xml
+++ b/docs/en-US/portable-ip.xml
@@ -33,17 +33,16 @@
           <para>IP is statically allocated </para>
         </listitem>
         <listitem>
-          <para>Not required to be associated with a network </para>
+          <para>IP need not be associated with a network </para>
         </listitem>
         <listitem>
-          <para>Can transfer association across the networks </para>
+          <para>Network association is transferable across networks </para>
         </listitem>
         <listitem>
-          <para>Transfer IP across basic/advanced zones </para>
+          <para>IP is transferable across both Basic and Advanced zones </para>
         </listitem>
         <listitem>
-          <para>Transfer across VPC, non-VPC isolated/shared networks
-          </para>
+          <para>IP is transferable across VPC, non-VPC Isolated and Shared networks </para>
         </listitem>
         <listitem>
           <para/>


[22/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
the plugin better not touch database


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/49ded7e4
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/49ded7e4
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/49ded7e4

Branch: refs/heads/vmsync
Commit: 49ded7e4a36789d13f4ab260bc45e8cd58967342
Parents: 57641d8
Author: Edison Su <su...@gmail.com>
Authored: Thu Jun 27 17:23:16 2013 -0700
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 15:09:43 2013 +0530

----------------------------------------------------------------------
 .../storage/image/BaseImageStoreDriverImpl.java |   6 +-
 .../driver/SimulatorImageStoreDriverImpl.java   | 100 ++++++-------------
 .../SamplePrimaryDataStoreDriverImpl.java       |   4 -
 .../com/cloud/storage/StorageManagerImpl.java   |  43 ++++----
 4 files changed, 52 insertions(+), 101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/49ded7e4/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
index 9db205b..8646247 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
@@ -68,7 +68,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
         return null;
     }
 
-    class CreateContext<T> extends AsyncRpcContext<T> {
+    protected class CreateContext<T> extends AsyncRpcContext<T> {
         final DataObject data;
 
         public CreateContext(AsyncCompletionCallback<T> callback, DataObject data) {
@@ -92,7 +92,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
         }
     }
 
-    protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher<BaseImageStoreDriverImpl, DownloadAnswer> callback,
+    protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher<? extends BaseImageStoreDriverImpl, DownloadAnswer> callback,
             CreateContext<CreateCmdResult> context) {
         DownloadAnswer answer = callback.getResult();
         DataObject obj = context.data;
@@ -139,7 +139,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
         return null;
     }
 
-    protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher<BaseImageStoreDriverImpl, DownloadAnswer> callback,
+    protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher<? extends BaseImageStoreDriverImpl, DownloadAnswer> callback,
             CreateContext<CreateCmdResult> context) {
         DownloadAnswer answer = callback.getResult();
         DataObject obj = context.data;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/49ded7e4/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
index 8816fc2..d5fe8a1 100644
--- a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
+++ b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
@@ -26,28 +26,20 @@ import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.NfsTO;
 import com.cloud.storage.Storage;
 import com.cloud.storage.VMTemplateStorageResourceAssoc;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VolumeDao;
 import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
-import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.framework.async.AsyncRpcContext;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
 import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl;
 import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
-import org.apache.cloudstack.storage.to.TemplateObjectTO;
-import org.apache.cloudstack.storage.to.VolumeObjectTO;
 
 import javax.inject.Inject;
-import java.util.Date;
+import java.util.UUID;
 
 public class SimulatorImageStoreDriverImpl extends BaseImageStoreDriverImpl {
 
@@ -69,82 +61,46 @@ public class SimulatorImageStoreDriverImpl extends BaseImageStoreDriverImpl {
         return nfsTO;
     }
 
-    class CreateContext<T> extends AsyncRpcContext<T> {
-        final DataObject data;
 
-        public CreateContext(AsyncCompletionCallback<T> callback, DataObject data) {
-            super(callback);
-            this.data = data;
-        }
-    }
 
     public String createEntityExtractUrl(DataStore store, String installPath, Storage.ImageFormat format) {
         return null;
     }
 
     @Override
-    public void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
-        CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
-        AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher
-                .create(this);
-        caller.setContext(context);
+    public void createAsync(DataStore store, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
         if (data.getType() == DataObjectType.TEMPLATE) {
-            this.createTemplateAsyncCallback(caller, context);
+            this.createTemplate(data, callback);
         } else if (data.getType() == DataObjectType.VOLUME) {
-            this.createVolumeAsyncCallback(caller, context);
+            this.createVolume(data, callback);
         }
     }
 
-    protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
-                                               CreateContext<CreateCmdResult> context) {
-        DataObject obj = context.data;
-        DataStore store = obj.getDataStore();
-        TemplateObjectTO templateTO = (TemplateObjectTO)context.data.getTO();
-
-        TemplateDataStoreVO tmpltStoreVO = _templateStoreDao.findByStoreTemplate(store.getId(), obj.getId());
-        if (tmpltStoreVO != null) {
-            TemplateDataStoreVO updateBuilder = _templateStoreDao.createForUpdate();
-            updateBuilder.setDownloadPercent(100);
-            updateBuilder.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
-            updateBuilder.setLastUpdated(new Date());
-            updateBuilder.setSize(new Long(5 * 1024L * 1024L));
-            updateBuilder.setPhysicalSize(new Long(5 * 1024L * 1024L));
-            updateBuilder.setDownloadUrl(templateTO.getOrigUrl());
-            updateBuilder.setInstallPath(templateTO.getPath());
-            updateBuilder.setTemplateId(templateTO.getId());
-            updateBuilder.setState(ObjectInDataStoreStateMachine.State.Ready);
-            _templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
-            // update size in vm_template table
-            VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
-            tmlptUpdater.setSize(new Long(5 * 1024l * 1024l));
-            _templateDao.update(obj.getId(), tmlptUpdater);
-        }
-        return null;
+    protected void createTemplate(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+        CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
+        AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher
+                .create(this);
+        caller.setContext(context);
+        caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null));
+        String path =  UUID.randomUUID().toString();
+        Long size = new Long(5 * 1024L * 1024L);
+        DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED,
+                path, path, size, size, null);
+        caller.complete(answer);
+        return;
     }
 
-    protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
-                                             CreateContext<CreateCmdResult> context) {
-        DataObject obj = context.data;
-        DataStore store = obj.getDataStore();
-        VolumeObjectTO volumeTO = (VolumeObjectTO) context.data.getTO();
-
-        VolumeDataStoreVO volStoreVO = _volumeStoreDao.findByStoreVolume(store.getId(), obj.getId());
-        if (volStoreVO != null) {
-            VolumeDataStoreVO updateBuilder = _volumeStoreDao.createForUpdate();
-            updateBuilder.setDownloadPercent(100);
-            updateBuilder.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
-            updateBuilder.setLastUpdated(new Date());
-            updateBuilder.setInstallPath(volumeTO.getPath());
-            updateBuilder.setVolumeId(volumeTO.getVolumeId());
-            updateBuilder.setSize(volumeTO.getSize());
-            updateBuilder.setPhysicalSize(volumeTO.getSize());
-            updateBuilder.setState(ObjectInDataStoreStateMachine.State.Ready);
-            _volumeStoreDao.update(volStoreVO.getId(), updateBuilder);
-            // update size in volume table
-            VolumeVO volUpdater = _volumeDao.createForUpdate();
-            volUpdater.setSize(volumeTO.getSize());
-            _volumeDao.update(obj.getId(), volUpdater);
-        }
-        return null;
+    protected void createVolume(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+        CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
+        AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher
+                .create(this);
+        caller.setContext(context);
+        caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null));
+        String path =  UUID.randomUUID().toString();
+        Long size = new Long(5 * 1024L * 1024L);
+        DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED,
+                path, path, size, size, null);
+        caller.complete(answer);
+        return;
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/49ded7e4/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
index 1d00c97..ece7b26 100644
--- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
@@ -54,16 +54,12 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
         return null;
     }
 
-<<<<<<< HEAD
     @Override
     public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
         return null;
     }
 
-    private class CreateVolumeContext<T> extends AsyncRpcConext<T> {
-=======
     private class CreateVolumeContext<T> extends AsyncRpcContext<T> {
->>>>>>> Fix typo in class name
         private final DataObject volume;
         public CreateVolumeContext(AsyncCompletionCallback<T> callback, DataObject volume) {
             super(callback);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/49ded7e4/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index ff323cb..138c6d7 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -570,7 +570,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         } else {
             s_logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled.");
         }
-
         return true;
     }
 
@@ -579,7 +578,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         if (_storageCleanupEnabled) {
             _executor.shutdown();
         }
-
         return true;
     }
 
@@ -591,7 +589,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         if (dc == null || !dc.isLocalStorageEnabled()) {
             return null;
         }
-        DataStore store = null;
+        DataStore store;
         try {
             StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(),
                     pInfo.getUuid());
@@ -693,21 +691,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             }
         }
 
-        Map ds = cmd.getDetails();
-        Map<String, String> details = new HashMap<String, String>();
-        if (ds != null) {
-            Collection detailsCollection = ds.values();
-            Iterator it = detailsCollection.iterator();
-            while (it.hasNext()) {
-                HashMap d = (HashMap) it.next();
-                Iterator it2 = d.entrySet().iterator();
-                while (it2.hasNext()) {
-                    Map.Entry entry = (Map.Entry) it2.next();
-                    details.put((String) entry.getKey(), (String) entry.getValue());
-                }
-            }
-        }
-
+        Map<String, String> details = extractApiParamAsMap(cmd.getDetails());
         DataCenterVO zone = _dcDao.findById(cmd.getZoneId());
         if (zone == null) {
             throw new InvalidParameterValueException("unable to find zone by id " + zoneId);
@@ -732,10 +716,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         params.put("capacityIops", cmd.getCapacityIops());
 
         DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
-        DataStore store = null;
+        DataStore store;
         try {
             store = lifeCycle.initialize(params);
-
             if (scopeType == ScopeType.CLUSTER) {
                 ClusterScope clusterScope = new ClusterScope(clusterId, podId, zoneId);
                 lifeCycle.attachCluster(store, clusterScope);
@@ -751,6 +734,23 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
     }
 
+    private Map<String, String> extractApiParamAsMap(Map ds) {
+        Map<String, String> details = new HashMap<String, String>();
+        if (ds != null) {
+            Collection detailsCollection = ds.values();
+            Iterator it = detailsCollection.iterator();
+            while (it.hasNext()) {
+                HashMap d = (HashMap) it.next();
+                Iterator it2 = d.entrySet().iterator();
+                while (it2.hasNext()) {
+                    Map.Entry entry = (Map.Entry) it2.next();
+                    details.put((String) entry.getKey(), (String) entry.getValue());
+                }
+            }
+        }
+        return details;
+    }
+
     @Override
     public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException {
         // Input validation
@@ -1637,7 +1637,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         }
 
         Long dcId = cmd.getZoneId();
-        String url = cmd.getUrl();
         Map details = cmd.getDetails();
         ScopeType scopeType = ScopeType.ZONE;
         if (dcId == null) {
@@ -1686,7 +1685,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         params.put("role", DataStoreRole.Image);
 
         DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
-        DataStore store = null;
+        DataStore store;
         try {
             store = lifeCycle.initialize(params);
         } catch (Exception e) {


[06/50] [abbrv] SolidFire plug-in and related changes

Posted by ah...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
new file mode 100644
index 0000000..839c5a5
--- /dev/null
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java
@@ -0,0 +1,901 @@
+package org.apache.cloudstack.storage.datastore.util;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.security.KeyManagementException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.List;
+import java.util.ArrayList;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+
+import org.apache.http.HttpResponse;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.conn.scheme.SchemeRegistry;
+import org.apache.http.conn.ssl.SSLSocketFactory;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.impl.conn.BasicClientConnectionManager;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+public class SolidFireUtil
+{
+	public static final String PROVIDER_NAME = "SolidFire";
+
+    public static final String MANAGEMENT_VIP = "mVip";
+    public static final String STORAGE_VIP = "sVip";
+
+    public static final String MANAGEMENT_PORT = "mPort";
+    public static final String STORAGE_PORT = "sPort";
+
+    public static final String CLUSTER_ADMIN_USERNAME = "clusterAdminUsername";
+    public static final String CLUSTER_ADMIN_PASSWORD = "clusterAdminPassword";
+
+    public static final String ACCOUNT_ID = "accountId";
+
+    public static final String CHAP_INITIATOR_USERNAME = "chapInitiatorUsername";
+    public static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret";
+
+    public static final String CHAP_TARGET_USERNAME = "chapTargetUsername";
+    public static final String CHAP_TARGET_SECRET = "chapTargetSecret";
+
+	public static long createSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword,
+			String strSfVolumeName, long lSfAccountId, long lTotalSize, boolean bEnable512e,
+			long lMinIops, long lMaxIops, long lBurstIops) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		VolumeToCreate volumeToCreate = new VolumeToCreate(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e,
+			lMinIops, lMaxIops, lBurstIops);
+		
+		String strVolumeToCreateJson = gson.toJson(volumeToCreate);
+		
+		String strVolumeCreateResultJson = executeJsonRpc(strVolumeToCreateJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+		
+		VolumeCreateResult volumeCreateResult = gson.fromJson(strVolumeCreateResultJson, VolumeCreateResult.class);
+		
+		verifyResult(volumeCreateResult.result, strVolumeCreateResultJson, gson);
+		
+		return volumeCreateResult.result.volumeID;
+	}
+	
+	public static void deleteSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, long lVolumeId) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		VolumeToDelete volumeToDelete = new VolumeToDelete(lVolumeId);
+		
+		String strVolumeToDeleteJson = gson.toJson(volumeToDelete);
+		
+		executeJsonRpc(strVolumeToDeleteJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+	}
+
+   public static void purgeSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, long lVolumeId) throws Exception
+    {
+        final Gson gson = new GsonBuilder().create();
+
+        VolumeToPurge volumeToPurge = new VolumeToPurge(lVolumeId);
+
+        String strVolumeToPurgeJson = gson.toJson(volumeToPurge);
+
+        executeJsonRpc(strVolumeToPurgeJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+    }
+
+	public static SolidFireVolume getSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, long lVolumeId) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		VolumeToGet volumeToGet = new VolumeToGet(lVolumeId);
+		
+		String strVolumeToGetJson = gson.toJson(volumeToGet);
+		
+		String strVolumeGetResultJson = executeJsonRpc(strVolumeToGetJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+		
+		VolumeGetResult volumeGetResult = gson.fromJson(strVolumeGetResultJson, VolumeGetResult.class);
+		
+		verifyResult(volumeGetResult.result, strVolumeGetResultJson, gson);
+		
+		String strVolumeName = getVolumeName(volumeGetResult, lVolumeId);
+		String strVolumeIqn = getVolumeIqn(volumeGetResult, lVolumeId);
+		long lAccountId = getVolumeAccountId(volumeGetResult, lVolumeId);
+		String strVolumeStatus = getVolumeStatus(volumeGetResult, lVolumeId);
+		
+		return new SolidFireVolume(lVolumeId, strVolumeName, strVolumeIqn, lAccountId, strVolumeStatus);
+	}
+
+	public static List<SolidFireVolume> getSolidFireVolumesForAccountId(String strSfMvip, int iSfPort,
+	        String strSfAdmin, String strSfPassword, long lAccountId) throws Exception
+	{
+        final Gson gson = new GsonBuilder().create();
+
+        VolumesToGetForAccount volumesToGetForAccount = new VolumesToGetForAccount(lAccountId);
+
+        String strVolumesToGetForAccountJson = gson.toJson(volumesToGetForAccount);
+
+        String strVolumesGetForAccountResultJson = executeJsonRpc(strVolumesToGetForAccountJson, strSfMvip, iSfPort,
+                strSfAdmin, strSfPassword);
+
+        VolumeGetResult volumeGetResult = gson.fromJson(strVolumesGetForAccountResultJson, VolumeGetResult.class);
+
+        verifyResult(volumeGetResult.result, strVolumesGetForAccountResultJson, gson);
+
+        List<SolidFireVolume> sfVolumes = new ArrayList<SolidFireVolume>();
+
+        for (VolumeGetResult.Result.Volume volume : volumeGetResult.result.volumes) {
+            sfVolumes.add(new SolidFireVolume(volume.volumeID, volume.name, volume.iqn, volume.accountID, volume.status));
+        }
+
+        return sfVolumes;
+	}
+
+	private static final String ACTIVE = "active";
+
+	public static class SolidFireVolume
+	{
+	    private final long _id;
+		private final String _name;
+		private final String _iqn;
+		private final long _accountId;
+		private final String _status;
+		
+		public SolidFireVolume(long id, String name, String iqn,
+		        long accountId, String status)
+		{
+			_id = id;
+			_name = name;
+			_iqn = "/" + iqn + "/0";
+			_accountId = accountId;
+			_status = status;
+		}
+		
+		public long getId()
+		{
+			return _id;
+		}
+		
+		public String getName()
+		{
+			return _name;
+		}
+		
+		public String getIqn()
+		{
+			return _iqn;
+		}
+		
+		public long getAccountId()
+		{
+		    return _accountId;
+		}
+		
+		public boolean isActive()
+		{
+		    return ACTIVE.equalsIgnoreCase(_status);
+		}
+		
+		@Override
+        public int hashCode() {
+            return (int)_id;
+        }
+        
+        @Override
+        public String toString() {
+            return _name;
+        }
+        
+        @Override
+        public boolean equals(Object obj) {
+            if (!(obj instanceof SolidFireVolume)) {
+                return false;
+            }
+            
+            SolidFireVolume sfv = (SolidFireVolume)obj;
+            
+            if (_id == sfv._id && _name.equals(sfv._name) &&
+                _iqn.equals(sfv._iqn) && isActive() == sfv.isActive()) {
+                return true;
+            }
+            
+            return false;
+        }
+	}
+	
+	public static long createSolidFireAccount(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword,
+	        String strAccountName) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		AccountToAdd accountToAdd = new AccountToAdd(strAccountName);
+		
+		String strAccountAddJson = gson.toJson(accountToAdd);
+		
+		String strAccountAddResultJson = executeJsonRpc(strAccountAddJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+		
+		AccountAddResult accountAddResult = gson.fromJson(strAccountAddResultJson, AccountAddResult.class);
+		
+		verifyResult(accountAddResult.result, strAccountAddResultJson, gson);
+		
+		return accountAddResult.result.accountID;
+	}
+	
+	public static void deleteSolidFireAccount(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword,
+	        long lAccountId) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		AccountToRemove accountToRemove = new AccountToRemove(lAccountId);
+		
+		String strAccountToRemoveJson = gson.toJson(accountToRemove);
+		
+		executeJsonRpc(strAccountToRemoveJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+	}
+	
+	public static SolidFireAccount getSolidFireAccountById(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword,
+	        long lSfAccountId) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		AccountToGetById accountToGetById = new AccountToGetById(lSfAccountId);
+		
+		String strAccountToGetByIdJson = gson.toJson(accountToGetById);
+		
+		String strAccountGetByIdResultJson = executeJsonRpc(strAccountToGetByIdJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+		
+		AccountGetResult accountGetByIdResult = gson.fromJson(strAccountGetByIdResultJson, AccountGetResult.class);
+		
+		verifyResult(accountGetByIdResult.result, strAccountGetByIdResultJson, gson);
+		
+		String strSfAccountName = accountGetByIdResult.result.account.username;
+		String strSfAccountInitiatorSecret = accountGetByIdResult.result.account.initiatorSecret;
+		String strSfAccountTargetSecret = accountGetByIdResult.result.account.targetSecret;
+		
+		return new SolidFireAccount(lSfAccountId, strSfAccountName, strSfAccountInitiatorSecret, strSfAccountTargetSecret);
+	}
+
+	public static SolidFireAccount getSolidFireAccountByName(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword,
+	        String strSfAccountName) throws Exception
+    {
+        final Gson gson = new GsonBuilder().create();
+
+        AccountToGetByName accountToGetByName = new AccountToGetByName(strSfAccountName);
+
+        String strAccountToGetByNameJson = gson.toJson(accountToGetByName);
+
+        String strAccountGetByNameResultJson = executeJsonRpc(strAccountToGetByNameJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+
+        AccountGetResult accountGetByNameResult = gson.fromJson(strAccountGetByNameResultJson, AccountGetResult.class);
+
+        verifyResult(accountGetByNameResult.result, strAccountGetByNameResultJson, gson);
+
+        long lSfAccountId = accountGetByNameResult.result.account.accountID;
+        String strSfAccountInitiatorSecret = accountGetByNameResult.result.account.initiatorSecret;
+        String strSfAccountTargetSecret = accountGetByNameResult.result.account.targetSecret;
+
+        return new SolidFireAccount(lSfAccountId, strSfAccountName, strSfAccountInitiatorSecret, strSfAccountTargetSecret);
+    }
+	
+	public static class SolidFireAccount
+	{
+		private final long _id;
+		private final String _name;
+		private final String _initiatorSecret;
+		private final String _targetSecret;
+		
+		public SolidFireAccount(long id, String name, String initiatorSecret, String targetSecret)
+		{
+			_id = id;
+			_name = name;
+			_initiatorSecret = initiatorSecret;
+			_targetSecret = targetSecret;
+		}
+		
+		public long getId()
+		{
+			return _id;
+		}
+		
+		public String getName()
+		{
+			return _name;
+		}
+		
+		public String getInitiatorSecret()
+		{
+			return _initiatorSecret;
+		}
+		
+		public String getTargetSecret()
+		{
+			return _targetSecret;
+		}
+		
+		@Override
+		public int hashCode() {
+		    return (int)_id;
+		}
+		
+		@Override
+		public String toString() {
+		    return _name;
+		}
+		
+		@Override
+		public boolean equals(Object obj) {
+		    if (!(obj instanceof SolidFireAccount)) {
+		        return false;
+		    }
+		    
+		    SolidFireAccount sfa = (SolidFireAccount)obj;
+		    
+		    if (_id == sfa._id && _name.equals(sfa._name) &&
+		        _initiatorSecret.equals(sfa._initiatorSecret) &&
+		        _targetSecret.equals(sfa._targetSecret)) {
+		        return true;
+		    }
+		    
+		    return false;
+		}
+	}
+
+    public static List<SolidFireVolume> getDeletedVolumes(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword) throws Exception
+    {
+        final Gson gson = new GsonBuilder().create();
+
+        ListDeletedVolumes listDeletedVolumes = new ListDeletedVolumes();
+
+        String strListDeletedVolumesJson = gson.toJson(listDeletedVolumes);
+
+        String strListDeletedVolumesResultJson = executeJsonRpc(strListDeletedVolumesJson, strSfMvip, iSfPort,
+                strSfAdmin, strSfPassword);
+
+        VolumeGetResult volumeGetResult = gson.fromJson(strListDeletedVolumesResultJson, VolumeGetResult.class);
+
+        verifyResult(volumeGetResult.result, strListDeletedVolumesResultJson, gson);
+
+        List<SolidFireVolume> deletedVolumes = new ArrayList<SolidFireVolume> ();
+
+        for (VolumeGetResult.Result.Volume volume : volumeGetResult.result.volumes) {
+            deletedVolumes.add(new SolidFireVolume(volume.volumeID, volume.name, volume.iqn, volume.accountID, volume.status));
+        }
+
+        return deletedVolumes;
+    }
+	
+	public static long createSolidFireVag(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, String strVagName) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		VagToCreate vagToCreate = new VagToCreate(strVagName);
+		
+		String strVagCreateJson = gson.toJson(vagToCreate);
+		
+		String strVagCreateResultJson = executeJsonRpc(strVagCreateJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+		
+		VagCreateResult vagCreateResult = gson.fromJson(strVagCreateResultJson, VagCreateResult.class);
+		
+		verifyResult(vagCreateResult.result, strVagCreateResultJson, gson);
+		
+		return vagCreateResult.result.volumeAccessGroupID;
+	}
+	
+	public static void deleteSolidFireVag(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, long lVagId) throws Exception
+	{
+		final Gson gson = new GsonBuilder().create();
+		
+		VagToDelete vagToDelete = new VagToDelete(lVagId);
+		
+		String strVagToDeleteJson = gson.toJson(vagToDelete);
+		
+		executeJsonRpc(strVagToDeleteJson, strSfMvip, iSfPort, strSfAdmin, strSfPassword);
+	}
+	
+	@SuppressWarnings("unused")
+	private static final class VolumeToCreate
+	{
+		private final String method = "CreateVolume";
+		private final VolumeToCreateParams params;
+		
+		private VolumeToCreate(final String strVolumeName, final long lAccountId, final long lTotalSize,
+				final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
+		{
+			params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e,
+					lMinIOPS, lMaxIOPS, lBurstIOPS);
+		}
+		
+		private static final class VolumeToCreateParams
+		{
+			private final String name;
+			private final long accountID;
+			private final long totalSize;
+			private final boolean enable512e;
+			private final VolumeToCreateParamsQoS qos;
+			
+			private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize,
+					final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
+			{
+				name = strVolumeName;
+				accountID = lAccountId;
+				totalSize = lTotalSize;
+				enable512e = bEnable512e;
+				
+				qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS);
+			}
+			
+			private static final class VolumeToCreateParamsQoS
+			{
+				private final long minIOPS;
+				private final long maxIOPS;
+				private final long burstIOPS;
+				
+				private VolumeToCreateParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
+				{
+					minIOPS = lMinIOPS;
+					maxIOPS = lMaxIOPS;
+					burstIOPS = lBurstIOPS;
+				}
+			}
+		}
+	}
+	
+	@SuppressWarnings("unused")
+	private static final class VolumeToDelete
+	{
+		private final String method = "DeleteVolume";
+		private final VolumeToDeleteParams params;
+		
+		private VolumeToDelete(final long lVolumeId)
+		{
+			params = new VolumeToDeleteParams(lVolumeId);
+		}
+		
+		private static final class VolumeToDeleteParams
+		{
+			private long volumeID;
+			
+			private VolumeToDeleteParams(final long lVolumeId)
+			{
+				volumeID = lVolumeId;
+			}
+		}
+	}
+
+    @SuppressWarnings("unused")
+    private static final class ListDeletedVolumes
+    {
+        private final String method = "ListDeletedVolumes";
+    }
+
+    @SuppressWarnings("unused")
+    private static final class VolumeToPurge
+    {
+        private final String method = "PurgeDeletedVolume";
+        private final VolumeToPurgeParams params;
+
+        private VolumeToPurge(final long lVolumeId)
+        {
+            params = new VolumeToPurgeParams(lVolumeId);
+        }
+
+        private static final class VolumeToPurgeParams
+        {
+            private long volumeID;
+
+            private VolumeToPurgeParams(final long lVolumeId)
+            {
+                volumeID = lVolumeId;
+            }
+        }
+    }
+
+	@SuppressWarnings("unused")
+	private static final class VolumeToGet
+	{
+		private final String method = "ListActiveVolumes";
+		private final VolumeToGetParams params;
+		
+		private VolumeToGet(final long lVolumeId)
+		{
+			params = new VolumeToGetParams(lVolumeId);
+		}
+		
+		private static final class VolumeToGetParams
+		{
+			private final long startVolumeID;
+			private final long limit = 1;
+			
+			private VolumeToGetParams(final long lVolumeId)
+			{
+				startVolumeID = lVolumeId;
+			}
+		}
+	}
+	
+    @SuppressWarnings("unused")
+    private static final class VolumesToGetForAccount
+    {
+        private final String method = "ListVolumesForAccount";
+        private final VolumesToGetForAccountParams params;
+
+        private VolumesToGetForAccount(final long lAccountId)
+        {
+            params = new VolumesToGetForAccountParams(lAccountId);
+        }
+
+        private static final class VolumesToGetForAccountParams
+        {
+            private final long accountID;
+
+            private VolumesToGetForAccountParams(final long lAccountId)
+            {
+                accountID = lAccountId;
+            }
+        }
+    }
+
+	@SuppressWarnings("unused")
+	private static final class AccountToAdd
+	{
+		private final String method = "AddAccount";
+		private final AccountToAddParams params;
+		
+		private AccountToAdd(final String strAccountName)
+		{
+			params = new AccountToAddParams(strAccountName);
+		}
+		
+		private static final class AccountToAddParams
+		{
+			private final String username;
+			
+			private AccountToAddParams(final String strAccountName)
+			{
+				username = strAccountName;
+			}
+		}
+	}
+	
+	@SuppressWarnings("unused")
+	private static final class AccountToRemove
+	{
+		private final String method = "RemoveAccount";
+		private final AccountToRemoveParams params;
+		
+		private AccountToRemove(final long lAccountId)
+		{
+			params = new AccountToRemoveParams(lAccountId);
+		}
+		
+		private static final class AccountToRemoveParams
+		{
+			private long accountID;
+			
+			private AccountToRemoveParams(final long lAccountId)
+			{
+				accountID = lAccountId;
+			}
+		}
+	}
+	
+	@SuppressWarnings("unused")
+	private static final class AccountToGetById
+	{
+		private final String method = "GetAccountByID";
+		private final AccountToGetByIdParams params;
+		
+		private AccountToGetById(final long lAccountId)
+		{
+			params = new AccountToGetByIdParams(lAccountId);
+		}
+		
+		private static final class AccountToGetByIdParams
+		{
+			private final long accountID;
+			
+			private AccountToGetByIdParams(final long lAccountId)
+			{
+				accountID = lAccountId;
+			}
+		}
+	}
+	
+    @SuppressWarnings("unused")
+    private static final class AccountToGetByName
+    {
+        private final String method = "GetAccountName";
+        private final AccountToGetByNameParams params;
+
+        private AccountToGetByName(final String strUsername)
+        {
+            params = new AccountToGetByNameParams(strUsername);
+        }
+
+        private static final class AccountToGetByNameParams
+        {
+            private final String username;
+
+            private AccountToGetByNameParams(final String strUsername)
+            {
+                username = strUsername;
+            }
+        }
+    }
+
+	@SuppressWarnings("unused")
+	private static final class VagToCreate
+	{
+		private final String method = "CreateVolumeAccessGroup";
+		private final VagToCreateParams params;
+		
+		private VagToCreate(final String strVagName)
+		{
+			params = new VagToCreateParams(strVagName);
+		}
+		
+		private static final class VagToCreateParams
+		{
+			private final String name;
+			
+			private VagToCreateParams(final String strVagName)
+			{
+				name = strVagName;
+			}
+		}
+	}
+	
+	@SuppressWarnings("unused")
+	private static final class VagToDelete
+	{
+		private final String method = "DeleteVolumeAccessGroup";
+		private final VagToDeleteParams params;
+		
+		private VagToDelete(final long lVagId)
+		{
+			params = new VagToDeleteParams(lVagId);
+		}
+		
+		private static final class VagToDeleteParams
+		{
+			private long volumeAccessGroupID;
+			
+			private VagToDeleteParams(final long lVagId)
+			{
+				volumeAccessGroupID = lVagId;
+			}
+		}
+	}
+	
+	private static final class VolumeCreateResult
+	{
+		private Result result;
+		
+		private static final class Result
+		{
+			private long volumeID;
+		}
+	}
+	
+	private static final class VolumeGetResult
+	{
+		private Result result;
+		
+		private static final class Result
+		{
+			private Volume[] volumes;
+			
+			private static final class Volume
+			{
+				private long volumeID;
+				private String name;
+				private String iqn;
+				private long accountID;
+				private String status;
+			}
+		}
+	}
+	
+	private static final class AccountAddResult
+	{
+		private Result result;
+		
+		private static final class Result
+		{
+			private long accountID;
+		}
+	}
+	
+	private static final class AccountGetResult
+	{
+		private Result result;
+		
+		private static final class Result
+		{
+			private Account account;
+			
+			private static final class Account
+			{
+			    private long accountID;
+				private String username;
+				private String initiatorSecret;
+				private String targetSecret;
+			}
+		}
+	}
+	
+	private static final class VagCreateResult
+	{
+		private Result result;
+		
+		private static final class Result
+		{
+			private long volumeAccessGroupID;
+		}
+	}
+	
+	private static final class JsonError
+	{
+		private Error error;
+		
+		private static final class Error
+		{
+			private String message;
+		}
+	}
+	
+	private static DefaultHttpClient getHttpClient(int iPort) throws NoSuchAlgorithmException, KeyManagementException {
+        SSLContext sslContext = SSLContext.getInstance("SSL");
+        X509TrustManager tm = new X509TrustManager() {
+            public void checkClientTrusted(X509Certificate[] xcs, String string) throws CertificateException {
+            }
+
+            public void checkServerTrusted(X509Certificate[] xcs, String string) throws CertificateException {
+            }
+
+            public X509Certificate[] getAcceptedIssuers() {
+                return null;
+            }
+        };
+        
+        sslContext.init(null, new TrustManager[] { tm }, new SecureRandom());
+        
+        SSLSocketFactory socketFactory = new SSLSocketFactory(sslContext, SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
+        SchemeRegistry registry = new SchemeRegistry();
+        
+        registry.register(new Scheme("https", iPort, socketFactory));
+        
+        BasicClientConnectionManager mgr = new BasicClientConnectionManager(registry);
+        DefaultHttpClient client = new DefaultHttpClient();
+        
+        return new DefaultHttpClient(mgr, client.getParams());
+	}
+	
+	private static String executeJsonRpc(String strJsonToExecute, String strMvip, int iPort,
+			String strAdmin, String strPassword) throws Exception
+	{
+	    DefaultHttpClient httpClient = null;
+	    StringBuilder sb = new StringBuilder();
+	    
+	    try
+	    {
+    		StringEntity input = new StringEntity(strJsonToExecute);
+    		
+    		input.setContentType("application/json");
+    		
+    		httpClient = getHttpClient(iPort);
+    		
+    		URI uri = new URI("https://" + strMvip + ":" + iPort + "/json-rpc/1.0");
+    		AuthScope authScope = new AuthScope(uri.getHost(), uri.getPort(), AuthScope.ANY_SCHEME);
+    		UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(strAdmin, strPassword);
+    		
+    		httpClient.getCredentialsProvider().setCredentials(authScope, credentials);
+    		
+    		HttpPost postRequest = new HttpPost(uri);
+    		
+    		postRequest.setEntity(input);
+    		
+    		HttpResponse response = httpClient.execute(postRequest);
+    		
+    		if (!isSuccess(response.getStatusLine().getStatusCode()))
+    		{
+    			throw new RuntimeException("Failed on JSON-RPC API call. HTTP error code = " + response.getStatusLine().getStatusCode());
+    		}
+    		
+    		BufferedReader br = new BufferedReader(new InputStreamReader(response.getEntity().getContent()));
+    		
+    		String strOutput;
+    		
+    		
+    		while ((strOutput = br.readLine()) != null)
+    		{
+    			sb.append(strOutput);
+    		}
+		} finally {
+	        if (httpClient != null) {
+	            try {
+	                httpClient.getConnectionManager().shutdown();
+	            } catch (Throwable t) {}
+	        }
+	    }
+		
+		return sb.toString();
+	}
+	
+	private static boolean isSuccess(int iCode) {
+	    return iCode >= 200 && iCode < 300;
+	}
+	
+	private static void verifyResult(Object obj, String strJson, Gson gson) throws IllegalStateException
+	{
+		if (obj != null)
+		{
+			return;
+		}
+		
+		JsonError jsonError = gson.fromJson(strJson, JsonError.class);
+		
+		if (jsonError != null)
+		{
+			throw new IllegalStateException(jsonError.error.message);
+		}
+		
+		throw new IllegalStateException("Problem with the following JSON: " + strJson);
+	}
+	
+	private static String getVolumeName(VolumeGetResult volumeGetResult, long lVolumeId) throws Exception
+	{
+		if (volumeGetResult.result.volumes != null && volumeGetResult.result.volumes.length == 1 &&
+			volumeGetResult.result.volumes[0].volumeID == lVolumeId)
+		{
+			return volumeGetResult.result.volumes[0].name;
+		}
+		
+		throw new Exception("Could not determine the name of the volume, " +
+		        "but the volume was created with an ID of " + lVolumeId + ".");
+	}
+	
+	private static String getVolumeIqn(VolumeGetResult volumeGetResult, long lVolumeId) throws Exception
+	{
+		if (volumeGetResult.result.volumes != null && volumeGetResult.result.volumes.length == 1 &&
+			volumeGetResult.result.volumes[0].volumeID == lVolumeId)
+		{
+			return volumeGetResult.result.volumes[0].iqn;
+		}
+		
+		throw new Exception("Could not determine the IQN of the volume, " +
+				"but the volume was created with an ID of " + lVolumeId + ".");
+	}
+
+    private static long getVolumeAccountId(VolumeGetResult volumeGetResult, long lVolumeId) throws Exception
+    {
+        if (volumeGetResult.result.volumes != null && volumeGetResult.result.volumes.length == 1 &&
+            volumeGetResult.result.volumes[0].volumeID == lVolumeId)
+        {
+            return volumeGetResult.result.volumes[0].accountID;
+        }
+
+        throw new Exception("Could not determine the volume's account ID, " +
+                "but the volume was created with an ID of " + lVolumeId + ".");
+    }
+
+    private static String getVolumeStatus(VolumeGetResult volumeGetResult, long lVolumeId) throws Exception
+    {
+        if (volumeGetResult.result.volumes != null && volumeGetResult.result.volumes.length == 1 &&
+            volumeGetResult.result.volumes[0].volumeID == lVolumeId)
+        {
+            return volumeGetResult.result.volumes[0].status;
+        }
+
+        throw new Exception("Could not determine the status of the volume, " +
+                "but the volume was created with an ID of " + lVolumeId + ".");
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
index 7022ee6..385ca36 100644
--- a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
@@ -67,6 +67,8 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase<DiskOfferingJoinVO,
         diskOfferingResponse.setDisplayText(offering.getDisplayText());
         diskOfferingResponse.setCreated(offering.getCreated());
         diskOfferingResponse.setDiskSize(offering.getDiskSize() / (1024 * 1024 * 1024));
+        diskOfferingResponse.setMinIops(offering.getMinIops());
+        diskOfferingResponse.setMaxIops(offering.getMaxIops());
 
         diskOfferingResponse.setDomain(offering.getDomainName());
         diskOfferingResponse.setDomainId(offering.getDomainUuid());
@@ -74,6 +76,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase<DiskOfferingJoinVO,
 
         diskOfferingResponse.setTags(offering.getTags());
         diskOfferingResponse.setCustomized(offering.isCustomized());
+        diskOfferingResponse.setCustomizedIops(offering.isCustomizedIops());
         diskOfferingResponse.setStorageType(offering.isUseLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString());
         diskOfferingResponse.setBytesReadRate(offering.getBytesReadRate());
         diskOfferingResponse.setBytesWriteRate(offering.getBytesWriteRate());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
index 68d9113..503a563 100644
--- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
@@ -83,6 +83,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
         long allocatedSize = pool.getUsedCapacity() + pool.getReservedCapacity();
         poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
         poolResponse.setDiskSizeAllocated(allocatedSize);
+        poolResponse.setCapacityIops(pool.getCapacityIops());
 
         // TODO: StatsCollector does not persist data
         StorageStats stats = ApiDBUtils.getStoragePoolStatistics(pool.getId());
@@ -144,6 +145,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
         long allocatedSize = ApiDBUtils.getStorageCapacitybyPool(pool.getId(), capacityType);
         poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
         poolResponse.setDiskSizeAllocated(allocatedSize);
+        poolResponse.setCapacityIops(pool.getCapacityIops());
 
         // TODO: StatsCollector does not persist data
         StorageStats stats = ApiDBUtils.getStoragePoolStatistics(pool.getId());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
index ed2732e..1c18c96 100644
--- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
@@ -101,6 +101,9 @@ public class VolumeJoinDaoImpl extends GenericDaoBase<VolumeJoinVO, Long> implem
         // Show the virtual size of the volume
         volResponse.setSize(volume.getSize());
 
+        volResponse.setMinIops(volume.getMinIops());
+        volResponse.setMaxIops(volume.getMaxIops());
+
         volResponse.setCreated(volume.getCreated());
         volResponse.setState(volume.getState().toString());
         if (volume.getState() == Volume.State.UploadOp) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
index 2336a48..58e8370 100644
--- a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
@@ -61,6 +61,15 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity,
     @Column(name="customized")
     private boolean customized;
 
+    @Column(name="customized_iops")
+    private Boolean customizedIops;
+
+    @Column(name="min_iops")
+    private Long minIops;
+
+    @Column(name="max_iops")
+    private Long maxIops;
+
     @Column(name="sort_key")
     int sortKey;
 
@@ -179,6 +188,30 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity,
         this.customized = customized;
     }
 
+    public Boolean isCustomizedIops() {
+        return customizedIops;
+    }
+
+    public void setCustomizedIops(Boolean customizedIops) {
+        this.customizedIops = customizedIops;
+    }
+
+    public Long getMinIops() {
+        return minIops;
+    }
+
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
+
     public boolean isDisplayOffering() {
         return displayOffering;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java
index c0d5ee9..69f2204 100644
--- a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java
@@ -60,7 +60,6 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
     @Column(name="host_address")
     private String hostAddress;
 
-
     @Column(name="status")
     @Enumerated(value=EnumType.STRING)
     private StoragePoolStatus status;
@@ -109,7 +108,6 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
     @Column(name="pod_name")
     private String podName;
 
-
     @Column(name="tag")
     private String tag;
 
@@ -119,7 +117,6 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
     @Column(name="disk_reserved_capacity")
     private long reservedCapacity;
 
-
     @Column(name="job_id")
     private Long jobId;
 
@@ -133,6 +130,8 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
     @Enumerated(value = EnumType.STRING)
     private ScopeType scope;
 
+    @Column(name="capacity_iops")
+    private Long capacityIops;
 
     @Column(name = "hypervisor")
     @Enumerated(value = EnumType.STRING)
@@ -243,6 +242,14 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
         this.capacityBytes = capacityBytes;
     }
 
+    public Long getCapacityIops() {
+        return capacityIops;
+    }
+
+    public void setCapacityIops(Long capacityIops) {
+        this.capacityIops = capacityIops;
+    }
+
     public long getClusterId() {
         return clusterId;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
index 1f07f52..701e195 100644
--- a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
@@ -58,6 +58,12 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity {
     @Column(name = "size")
     long size;
 
+    @Column(name = "min_iops")
+    Long minIops;
+
+    @Column(name = "max_iops")
+    Long maxIops;
+
     @Column(name = "state")
     @Enumerated(value = EnumType.STRING)
     private Volume.State state;
@@ -337,14 +343,27 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity {
         this.size = size;
     }
 
+    public Long getMinIops() {
+        return minIops;
+    }
 
+    public void setMinIops(Long minIops) {
+        this.minIops = minIops;
+    }
+
+    public Long getMaxIops() {
+        return maxIops;
+    }
+
+    public void setMaxIops(Long maxIops) {
+        this.maxIops = maxIops;
+    }
 
     public Volume.State getState() {
         return state;
     }
 
 
-
     public void setState(Volume.State state) {
         this.state = state;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/configuration/ConfigurationManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java
index 98eae37..1b99b63 100755
--- a/server/src/com/cloud/configuration/ConfigurationManager.java
+++ b/server/src/com/cloud/configuration/ConfigurationManager.java
@@ -102,14 +102,18 @@ public interface ConfigurationManager extends ConfigurationService, Manager {
      * @param isCustomized
      * @param localStorageRequired
      * @param isDisplayOfferingEnabled
+     * @param isCustomizedIops (is admin allowing users to set custom iops?)
+     * @param minIops
+     * @param maxIops
      * @param bytesReadRate
      * @param bytesWriteRate
      * @param iopsReadRate
      * @param iopsWriteRate
      * @return newly created disk offering
      */
-    DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
-            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate);
+    DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+    		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
+    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate);
 
     /**
      * Creates a new pod

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
index 3840c12..2089f82 100755
--- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
+++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
@@ -2297,8 +2297,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
 
     @Override
     @ActionEvent(eventType = EventTypes.EVENT_DISK_OFFERING_CREATE, eventDescription = "creating disk offering")
-    public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
-            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
+    public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+    		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
+    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
         long diskSize = 0;// special case for custom disk offerings
         if (numGibibytes != null && (numGibibytes <= 0)) {
             throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb.");
@@ -2314,8 +2315,44 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
             isCustomized = true;
         }
 
+        if (isCustomizedIops != null) {
+            bytesReadRate = null;
+            bytesWriteRate = null;
+            iopsReadRate = null;
+            iopsWriteRate = null;
+
+            if (isCustomizedIops) {
+            	minIops = null;
+            	maxIops = null;
+            }
+            else {
+                if (minIops == null && maxIops == null) {
+                    minIops = 0L;
+                    maxIops = 0L;
+                }
+                else {
+                	if (minIops == null || minIops <= 0) {
+                	    throw new InvalidParameterValueException("The min IOPS must be greater than 0.");
+        	        }
+
+                	if (maxIops == null) {
+        	        	maxIops = 0L;
+        	        }
+
+                	if (minIops > maxIops) {
+                		throw new InvalidParameterValueException("The min IOPS must be less than or equal to the max IOPS.");
+                	}
+                }
+            }
+        }
+        else {
+            minIops = null;
+            maxIops = null;
+        }
+
         tags = cleanupTags(tags);
-        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized);
+        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized,
+        		isCustomizedIops, minIops, maxIops);
         newDiskOffering.setUseLocalStorage(localStorageRequired);
         newDiskOffering.setDisplayOffering(isDisplayOfferingEnabled);
 
@@ -2355,7 +2392,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         Long domainId = cmd.getDomainId();
 
         if (!isCustomized && numGibibytes == null) {
-            throw new InvalidParameterValueException("Disksize is required for non-customized disk offering");
+            throw new InvalidParameterValueException("Disksize is required for a non-customized disk offering");
         }
 
         boolean localStorageRequired = false;
@@ -2369,11 +2406,17 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
             }
         }
 
+        Boolean isCustomizedIops = cmd.isCustomizedIops();
+        Long minIops = cmd.getMinIops();
+        Long maxIops = cmd.getMaxIops();
         Long bytesReadRate = cmd.getBytesReadRate();
         Long bytesWriteRate = cmd.getBytesWriteRate();
         Long iopsReadRate = cmd.getIopsReadRate();
         Long iopsWriteRate = cmd.getIopsWriteRate();
-        return createDiskOffering(domainId, name, description, numGibibytes, tags, isCustomized, localStorageRequired, isDisplayOfferingEnabled, bytesReadRate, bytesWriteRate, iopsReadRate, iopsWriteRate);
+
+        return createDiskOffering(domainId, name, description, numGibibytes, tags, isCustomized,
+        		localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops,
+        		bytesReadRate, bytesWriteRate, iopsReadRate, iopsWriteRate);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/server/ConfigurationServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java
index 1ddfcfa..9e79b76 100755
--- a/server/src/com/cloud/server/ConfigurationServerImpl.java
+++ b/server/src/com/cloud/server/ConfigurationServerImpl.java
@@ -932,7 +932,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
         diskSize = diskSize * 1024 * 1024 * 1024;
         tags = cleanupTags(tags);
 
-        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized);
+        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized, null, null, null);
         newDiskOffering.setUniqueName("Cloud.Com-" + name);
         newDiskOffering.setSystemUse(isSystemUse);
         newDiskOffering = _diskOfferingDao.persistDeafultDiskOffering(newDiskOffering);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/storage/StorageManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManager.java b/server/src/com/cloud/storage/StorageManager.java
index 29c7ebc..df69092 100755
--- a/server/src/com/cloud/storage/StorageManager.java
+++ b/server/src/com/cloud/storage/StorageManager.java
@@ -99,28 +99,23 @@ public interface StorageManager extends StorageService {
 
     void cleanupSecondaryStorage(boolean recurring);
 
-
 	HypervisorType getHypervisorTypeFromFormat(ImageFormat format);
 
+    boolean storagePoolHasEnoughIops(List<Volume> volume, StoragePool pool);
+
     boolean storagePoolHasEnoughSpace(List<Volume> volume, StoragePool pool);
 
-    
     boolean registerHostListener(String providerUuid, HypervisorHostListener listener);
 
     StoragePool findStoragePool(DiskProfile dskCh, DataCenterVO dc,
             HostPodVO pod, Long clusterId, Long hostId, VMInstanceVO vm,
             Set<StoragePool> avoid);
 
-
     void connectHostToSharedPool(long hostId, long poolId)
             throws StorageUnavailableException;
 
     void createCapacityEntry(long poolId);
 
-
-
-
-
     DataStore createLocalStorage(Host host, StoragePoolInfo poolInfo) throws ConnectionException;
 
     BigDecimal getStorageOverProvisioningFactor(Long dcId);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index b3e8b96..241f6e6 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -694,9 +694,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 throw new InvalidParameterValueException(
                         "Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
             }
-            if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware) {
+            if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware &&
+                hypervisorType != HypervisorType.Any) {
                 throw new InvalidParameterValueException(
-                        "zone wide storage pool is not suported for hypervisor type " + hypervisor);
+                        "zone wide storage pool is not supported for hypervisor type " + hypervisor);
             }
         }
 
@@ -734,6 +735,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         params.put("name", cmd.getStoragePoolName());
         params.put("details", details);
         params.put("providerName", storeProvider.getName());
+        params.put("managed", cmd.isManaged());
+        params.put("capacityBytes", cmd.getCapacityBytes());
+        params.put("capacityIops", cmd.getCapacityIops());
 
         DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
         DataStore store = null;
@@ -1561,7 +1565,41 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     }
 
     @Override
-    public boolean storagePoolHasEnoughSpace(List<Volume> volumes, StoragePool pool) {
+    public boolean storagePoolHasEnoughIops(List<Volume> requestedVolumes,
+            StoragePool pool) {
+        if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null)
+            return false;
+
+        long currentIops = 0;
+
+        List<VolumeVO> volumesInPool = _volumeDao.findByPoolId(pool.getId(), null);
+
+        for (VolumeVO volumeInPool : volumesInPool) {
+            Long minIops = volumeInPool.getMinIops();
+
+            if (minIops != null && minIops > 0) {
+                currentIops += minIops;
+            }
+        }
+
+        long requestedIops = 0;
+
+        for (Volume requestedVolume : requestedVolumes) {
+            Long minIops = requestedVolume.getMinIops();
+
+            if (minIops != null && minIops > 0) {
+                requestedIops += minIops;
+            }
+        }
+
+        long futureIops = currentIops + requestedIops;
+
+        return futureIops <= pool.getCapacityIops();
+    }
+
+    @Override
+    public boolean storagePoolHasEnoughSpace(List<Volume> volumes,
+            StoragePool pool) {
         if (volumes == null || volumes.isEmpty())
             return false;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/storage/VolumeManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java
index c84bb67..2e44a3c 100644
--- a/server/src/com/cloud/storage/VolumeManager.java
+++ b/server/src/com/cloud/storage/VolumeManager.java
@@ -45,7 +45,6 @@ import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
 
 public interface VolumeManager extends VolumeApiService {
-
     VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId,
             Long destPoolClusterId, HypervisorType dataDiskHyperType)
             throws ConcurrentOperationException;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/storage/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java
index 4e7b335..a293da5 100644
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@ -55,6 +55,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
@@ -65,6 +66,7 @@ import org.apache.cloudstack.storage.command.AttachCommand;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.DettachCommand;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
@@ -227,6 +229,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
     @Inject
     protected StoragePoolHostDao _storagePoolHostDao;
     @Inject
+    StoragePoolDetailsDao storagePoolDetailsDao;
+    @Inject
     protected AlertManager _alertMgr;
     @Inject
     protected TemplateDataStoreDao _vmTemplateStoreDao = null;
@@ -507,7 +511,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(),
                 oldVol.getName(), oldVol.getDataCenterId(),
                 oldVol.getDomainId(), oldVol.getAccountId(),
-                oldVol.getDiskOfferingId(), oldVol.getSize());
+                oldVol.getDiskOfferingId(), oldVol.getSize(),
+                oldVol.getMinIops(), oldVol.getMaxIops(), oldVol.get_iScsiName());
         if (templateId != null) {
             newVol.setTemplateId(templateId);
         } else {
@@ -680,9 +685,9 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         pool = storageMgr.findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(),
                 vm, avoidPools);
         if (pool == null) {
-            s_logger.warn("Unable to find storage poll when create volume "
+            s_logger.warn("Unable to find storage pool when create volume "
                     + volume.getName());
-            throw new CloudRuntimeException("Unable to find storage poll when create volume" + volume.getName());
+            throw new CloudRuntimeException("Unable to find storage pool when create volume" + volume.getName());
         }
 
         if (s_logger.isDebugEnabled()) {
@@ -731,8 +736,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         Transaction txn = Transaction.currentTxn();
         txn.start();
 
-        VolumeVO volume = new VolumeVO(volumeName, zoneId, -1L, -1L, -1,
-                new Long(-1), null, null, 0, Volume.Type.DATADISK);
+        VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1,
+                new Long(-1), null, null, 0, null, null, null, Volume.Type.DATADISK);
         volume.setPoolId(null);
         volume.setDataCenterId(zoneId);
         volume.setPodId(null);
@@ -835,6 +840,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         Long diskOfferingId = null;
         DiskOfferingVO diskOffering = null;
         Long size = null;
+        Long minIops = null;
+        Long maxIops = null;
         // Volume VO used for extracting the source template id
         VolumeVO parentVolume = null;
 
@@ -896,6 +903,37 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
                 size = diskOffering.getDiskSize();
             }
 
+            Boolean isCustomizedIops = diskOffering.isCustomizedIops();
+
+            if (isCustomizedIops != null) {
+                if (isCustomizedIops) {
+                	minIops = cmd.getMinIops();
+                	maxIops = cmd.getMaxIops();
+
+                	if (minIops == null && maxIops == null) {
+                	    minIops = 0L;
+                	    maxIops = 0L;
+                	}
+                	else {
+                        if (minIops == null || minIops <= 0) {
+                            throw new InvalidParameterValueException("The min IOPS must be greater than 0.");
+                        }
+
+                    	if (maxIops == null) {
+            	        	maxIops = 0L;
+            	        }
+
+                    	if (minIops > maxIops) {
+                    		throw new InvalidParameterValueException("The min IOPS must be less than or equal to the max IOPS.");
+                    	}
+                	}
+                }
+                else {
+                    minIops = diskOffering.getMinIops();
+                    maxIops = diskOffering.getMaxIops();
+                }
+            }
+
             if (!validateVolumeSizeRange(size)) {// convert size from mb to gb
                                                  // for validation
                 throw new InvalidParameterValueException(
@@ -970,8 +1008,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         Transaction txn = Transaction.currentTxn();
         txn.start();
 
-        VolumeVO volume = new VolumeVO(userSpecifiedName, -1L, -1L, -1, -1,
-                new Long(-1), null, null, 0, Volume.Type.DATADISK);
+        VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1,
+                new Long(-1), null, null, 0, null, null, null, Volume.Type.DATADISK);
         volume.setPoolId(null);
         volume.setDataCenterId(zoneId);
         volume.setPodId(null);
@@ -980,6 +1018,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
                 .getDomainId()));
         volume.setDiskOfferingId(diskOfferingId);
         volume.setSize(size);
+        volume.setMinIops(minIops);
+        volume.setMaxIops(maxIops);
         volume.setInstanceId(null);
         volume.setUpdated(new Date());
         volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller
@@ -1171,7 +1211,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
 
         UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
 
-        PrimaryDataStoreInfo pool = (PrimaryDataStoreInfo)dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
         long currentSize = volume.getSize();
 
         /*
@@ -1358,7 +1397,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
             size = (size * 1024 * 1024 * 1024);
         }
         VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(),
-                owner.getDomainId(), owner.getId(), offering.getId(), size);
+                owner.getDomainId(), owner.getId(), offering.getId(), size,
+                offering.getMinIops(), offering.getMaxIops(), null);
         if (vm != null) {
             vol.setInstanceId(vm.getId());
         }
@@ -1398,7 +1438,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId());
 
         VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(),
-                owner.getDomainId(), owner.getId(), offering.getId(), size);
+                owner.getDomainId(), owner.getId(), offering.getId(), size,
+                offering.getMinIops(), offering.getMaxIops(), null);
         vol.setFormat(this.getSupportedImageFormatForCluster(template.getHypervisorType()));
         if (vm != null) {
             vol.setInstanceId(vm.getId());
@@ -1542,8 +1583,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         return !storeForRootStoreScope.isSameScope(storeForDataStoreScope);
     }
 
-    private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) {
-        String errorMsg = "Failed to attach volume: " + volume.getName()
+    private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, Long deviceId) {
+        String errorMsg = "Failed to attach volume: " + volumeToAttach.getName()
                 + " to VM: " + vm.getHostName();
         boolean sendCommand = (vm.getState() == State.Running);
         AttachAnswer answer = null;
@@ -1557,12 +1598,37 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
             }
         }
 
+        StoragePoolVO volumeToAttachStoragePool = null;
+
         if (sendCommand) {
-            DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
-            DiskTO disk = new DiskTO(volTO, deviceId, volume.getVolumeType());
+            volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
+            long storagePoolId = volumeToAttachStoragePool.getId();
+
+            DataTO volTO = volFactory.getVolume(volumeToAttach.getId()).getTO();
+            DiskTO disk = new DiskTO(volTO, deviceId, null, volumeToAttach.getVolumeType());
+
             AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName());
+
+            cmd.setManaged(volumeToAttachStoragePool.isManaged());
+
+            cmd.setStorageHost(volumeToAttachStoragePool.getHostAddress());
+            cmd.setStoragePort(volumeToAttachStoragePool.getPort());
+
+            cmd.set_iScsiName(volumeToAttach.get_iScsiName());
+
+            VolumeInfo volumeInfo = volFactory.getVolume(volumeToAttach.getId());
+            DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
+            ChapInfo chapInfo = volService.getChapInfo(volumeInfo, dataStore);
+
+            if (chapInfo != null) {
+                cmd.setChapInitiatorUsername(chapInfo.getInitiatorUsername());
+                cmd.setChapInitiatorPassword(chapInfo.getInitiatorSecret());
+                cmd.setChapTargetUsername(chapInfo.getTargetUsername());
+                cmd.setChapTargetPassword(chapInfo.getTargetSecret());
+            }
+
             try {
-                answer = (AttachAnswer) _agentMgr.send(hostId, cmd);
+                answer = (AttachAnswer)_agentMgr.send(hostId, cmd);
             } catch (Exception e) {
                 throw new CloudRuntimeException(errorMsg + " due to: "
                         + e.getMessage());
@@ -1573,19 +1639,29 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
             // Mark the volume as attached
             if (sendCommand) {
                 DiskTO disk = answer.getDisk();
-                _volsDao.attachVolume(volume.getId(), vm.getId(),
+                _volsDao.attachVolume(volumeToAttach.getId(), vm.getId(),
                         disk.getDiskSeq());
+
+                volumeToAttach = _volsDao.findById(volumeToAttach.getId());
+
+                if (volumeToAttachStoragePool.isManaged() &&
+                	volumeToAttach.getPath() == null) {
+                	volumeToAttach.setPath(answer.getDisk().getVdiUuid());
+
+                	_volsDao.update(volumeToAttach.getId(), volumeToAttach);
+                }
             } else {
-                _volsDao.attachVolume(volume.getId(), vm.getId(), deviceId);
+                _volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId);
             }
+
             // insert record for disk I/O statistics
-            VmDiskStatisticsVO diskstats = _vmDiskStatsDao.findBy(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volume.getId());
+            VmDiskStatisticsVO diskstats = _vmDiskStatsDao.findBy(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volumeToAttach.getId());
             if (diskstats == null) {
-               diskstats = new VmDiskStatisticsVO(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volume.getId());
+               diskstats = new VmDiskStatisticsVO(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volumeToAttach.getId());
                _vmDiskStatsDao.persist(diskstats);
             }
 
-            return _volsDao.findById(volume.getId());
+            return _volsDao.findById(volumeToAttach.getId());
         } else {
             if (answer != null) {
                 String details = answer.getDetails();
@@ -1912,9 +1988,17 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         Answer answer = null;
 
         if (sendCommand) {
+            StoragePoolVO volumePool = _storagePoolDao.findById(volume.getPoolId());
+
             DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
-            DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getVolumeType());
+            DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), null, volume.getVolumeType());
+
             DettachCommand cmd = new DettachCommand(disk, vm.getInstanceName());
+
+            cmd.setManaged(volumePool.isManaged());
+
+            cmd.set_iScsiName(volume.get_iScsiName());
+
             try {
                 answer = _agentMgr.send(vm.getHostId(), cmd);
             } catch (Exception e) {
@@ -1926,6 +2010,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         if (!sendCommand || (answer != null && answer.getResult())) {
             // Mark the volume as detached
             _volsDao.detachVolume(volume.getId());
+
             return _volsDao.findById(volumeId);
         } else {
 
@@ -1940,11 +2025,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         }
     }
 
-
-
-
-
-
     @DB
     protected VolumeVO switchVolume(VolumeVO existingVolume,
             VirtualMachineProfile<? extends VirtualMachine> vm)
@@ -2232,7 +2312,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
 
         for (VolumeVO vol : vols) {
             DataTO volTO = volFactory.getVolume(vol.getId()).getTO();
-            DiskTO disk = new DiskTO(volTO, vol.getDeviceId(), vol.getVolumeType());
+            DiskTO disk = new DiskTO(volTO, vol.getDeviceId(), null, vol.getVolumeType());
             vm.addDisk(disk);
         }
 
@@ -2240,7 +2320,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
             UserVmVO userVM = (UserVmVO) vm.getVirtualMachine();
             if (userVM.getIsoId() != null) {
                 DataTO dataTO = tmplFactory.getTemplate(userVM.getIsoId(), DataStoreRole.Image, userVM.getDataCenterId()).getTO();
-                DiskTO iso = new DiskTO(dataTO, 3L, Volume.Type.ISO);
+                DiskTO iso = new DiskTO(dataTO, 3L, null, Volume.Type.ISO);
                 vm.addDisk(iso);
             }
         }
@@ -2458,7 +2538,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
                 vol = result.first();
             }
             DataTO volumeTO = volFactory.getVolume(vol.getId()).getTO();
-            DiskTO disk = new DiskTO(volumeTO, vol.getDeviceId(), vol.getVolumeType());
+            DiskTO disk = new DiskTO(volumeTO, vol.getDeviceId(), null, vol.getVolumeType());
             vm.addDisk(disk);
         }
     }
@@ -2745,7 +2825,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
 
     @Override
     public String getVmNameFromVolumeId(long volumeId) {
-        Long instanceId;
         VolumeVO volume = _volsDao.findById(volumeId);
         return getVmNameOnVolume(volume);
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/template/TemplateManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java
index f70d44d..ca644af 100755
--- a/server/src/com/cloud/template/TemplateManagerImpl.java
+++ b/server/src/com/cloud/template/TemplateManagerImpl.java
@@ -1023,7 +1023,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
         }
 
         DataTO isoTO = tmplt.getTO();
-        DiskTO disk = new DiskTO(isoTO, null, Volume.Type.ISO);
+        DiskTO disk = new DiskTO(isoTO, null, null, Volume.Type.ISO);
         Command cmd = null;
         if (attach) {
             cmd = new AttachCommand(disk, vmName);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/test/DatabaseConfig.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/test/DatabaseConfig.java b/server/src/com/cloud/test/DatabaseConfig.java
index ef0259d..63f77b6 100755
--- a/server/src/com/cloud/test/DatabaseConfig.java
+++ b/server/src/com/cloud/test/DatabaseConfig.java
@@ -979,7 +979,7 @@ public class DatabaseConfig {
             newTags.delete(newTags.length() - 1, newTags.length());
             tags = newTags.toString();
         }
-        DiskOfferingVO diskOffering = new DiskOfferingVO(domainId, name, displayText, diskSpace , tags, false);
+        DiskOfferingVO diskOffering = new DiskOfferingVO(domainId, name, displayText, diskSpace, tags, false, null, null, null);
         diskOffering.setUseLocalStorage(local);
 
         Long bytesReadRate = Long.parseLong(_currentObjectParams.get("bytesReadRate"));

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 3cef182..a59fa5b 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -2910,12 +2910,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
 
             TemplateObjectTO iso = (TemplateObjectTO)template.getTO();
             iso.setGuestOsType(displayName);
-            DiskTO disk = new DiskTO(iso, 3L, Volume.Type.ISO);
+            DiskTO disk = new DiskTO(iso, 3L, null, Volume.Type.ISO);
             profile.addDisk(disk);
         } else {
             TemplateObjectTO iso = new TemplateObjectTO();
             iso.setFormat(ImageFormat.ISO);
-            DiskTO disk = new DiskTO(iso, 3L, Volume.Type.ISO);
+            DiskTO disk = new DiskTO(iso, 3L, null, Volume.Type.ISO);
             profile.addDisk(disk);
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
index 95230a5..7a61978 100755
--- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
+++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java
@@ -655,8 +655,9 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu
      * @see com.cloud.configuration.ConfigurationManager#createDiskOffering(java.lang.Long, java.lang.String, java.lang.String, java.lang.Long, java.lang.String, boolean, boolean, boolean)
      */
     @Override
-    public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired, boolean isDisplayOfferingEnabled,
-            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
+    public DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized,
+    		boolean localStorageRequired, boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
+    		Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate) {
         // TODO Auto-generated method stub
         return null;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/setup/db/db/schema-410to420.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql
index 2d0f8de..0c1d753 100644
--- a/setup/db/db/schema-410to420.sql
+++ b/setup/db/db/schema-410to420.sql
@@ -429,6 +429,20 @@ ALTER TABLE `cloud`.`nics` ADD COLUMN `display_nic` tinyint(1) NOT NULL DEFAULT
 
 ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `display_offering` tinyint(1) NOT NULL DEFAULT 1 COMMENT 'Should disk offering be displayed to the end user';
 
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `customized_iops` tinyint(1) unsigned COMMENT 'Should customized IOPS be displayed to the end user';
+
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `min_iops` bigint(20) unsigned COMMENT 'Minimum IOPS';
+
+ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `max_iops` bigint(20) unsigned COMMENT 'Maximum IOPS';
+
+ALTER TABLE `cloud`.`volumes` ADD COLUMN `min_iops` bigint(20) unsigned COMMENT 'Minimum IOPS';
+
+ALTER TABLE `cloud`.`volumes` ADD COLUMN `max_iops` bigint(20) unsigned COMMENT 'Maximum IOPS';
+
+ALTER TABLE `cloud`.`storage_pool` ADD COLUMN `managed` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Should CloudStack manage this storage';
+
+ALTER TABLE `cloud`.`storage_pool` ADD COLUMN `capacity_iops` bigint(20) unsigned DEFAULT NULL COMMENT 'IOPS CloudStack can provision from this storage pool';
+
 ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `bytes_read_rate` bigint(20);
 
 ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `bytes_write_rate` bigint(20);
@@ -871,6 +885,8 @@ CREATE VIEW `cloud`.`volume_view` AS
         volumes.device_id,
         volumes.volume_type,
         volumes.size,
+        volumes.min_iops,
+        volumes.max_iops,
         volumes.created,
         volumes.state,
         volumes.attached,
@@ -981,6 +997,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS
         storage_pool.created,
         storage_pool.removed,
         storage_pool.capacity_bytes,
+        storage_pool.capacity_iops,
         storage_pool.scope,
         storage_pool.hypervisor,
         cluster.id cluster_id,
@@ -1521,9 +1538,12 @@ CREATE VIEW `cloud`.`disk_offering_view` AS
         disk_offering.name,
         disk_offering.display_text,
         disk_offering.disk_size,
+        disk_offering.min_iops,
+        disk_offering.max_iops,
         disk_offering.created,
         disk_offering.tags,
         disk_offering.customized,
+        disk_offering.customized_iops,
         disk_offering.removed,
         disk_offering.use_local_storage,
         disk_offering.system_use,
@@ -1736,6 +1756,8 @@ CREATE VIEW `cloud`.`volume_view` AS
         volumes.device_id,
         volumes.volume_type,
         volumes.size,
+        volumes.min_iops,
+        volumes.max_iops,
         volumes.created,
         volumes.state,
         volumes.attached,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/tools/marvin/marvin/cloudstackConnection.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/cloudstackConnection.py b/tools/marvin/marvin/cloudstackConnection.py
index 9d60ff9..8129396 100644
--- a/tools/marvin/marvin/cloudstackConnection.py
+++ b/tools/marvin/marvin/cloudstackConnection.py
@@ -203,7 +203,7 @@ class cloudConnection(object):
                             i = i + 1
         return cmdname, isAsync, requests
 
-    def marvin_request(self, cmd, response_type=None, method='GET'):
+    def marvin_request(self, cmd, response_type=None, method='GET', data=''):
         """
         Requester for marvin command objects
         @param cmd: marvin's command from cloudstackAPI

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/dictionary.jsp
----------------------------------------------------------------------
diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp
index 7809cdb..af64228 100644
--- a/ui/dictionary.jsp
+++ b/ui/dictionary.jsp
@@ -25,6 +25,9 @@ under the License.
 <% long now = System.currentTimeMillis(); %>
 <script language="javascript">
 dictionary = {
+'label.custom.disk.iops': '<fmt:message key="label.custom.disk.iops" />',
+'label.disk.iops.min': '<fmt:message key="label.disk.iops.min" />',
+'label.disk.iops.max': '<fmt:message key="label.disk.iops.max" />',
 'label.acquire.new.secondary.ip': '<fmt:message key="label.acquire.new.secondary.ip" />',
 'label.view.secondary.ips': '<fmt:message key="label.view.secondary.ips" />',
 'message.acquire.ip.nic': '<fmt:message key="message.acquire.ip.nic" />',
@@ -472,6 +475,7 @@ dictionary = {
 'label.disable.vpn': '<fmt:message key="label.disable.vpn" />',
 'label.disabling.vpn.access': '<fmt:message key="label.disabling.vpn.access" />',
 'label.disk.allocated': '<fmt:message key="label.disk.allocated" />',
+'label.disk.iops.total': '<fmt:message key="label.disk.iops.total" />',
 'label.disk.bytes.read.rate': '<fmt:message key="label.disk.bytes.read.rate" />',
 'label.disk.bytes.write.rate': '<fmt:message key="label.disk.bytes.write.rate" />',
 'label.disk.iops.write.rate': '<fmt:message key="label.disk.iops.write.rate" />',
@@ -1029,6 +1033,7 @@ dictionary = {
 'label.storage': '<fmt:message key="label.storage" />',
 'label.storage.tags': '<fmt:message key="label.storage.tags" />',
 'label.storage.type': '<fmt:message key="label.storage.type" />',
+'label.qos.type': '<fmt:message key="label.qos.type" />',
 'label.subdomain.access': '<fmt:message key="label.subdomain.access" />',
 'label.submit': '<fmt:message key="label.submit" />',
 'label.submitted.by': '<fmt:message key="label.submitted.by" />',

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/scripts/configuration.js
----------------------------------------------------------------------
diff --git a/ui/scripts/configuration.js b/ui/scripts/configuration.js
index 7f0e1a5..ab70c3d 100644
--- a/ui/scripts/configuration.js
+++ b/ui/scripts/configuration.js
@@ -1015,6 +1015,86 @@
                     dependsOn: 'isCustomized',
                     validation: { required: true, number: true }
                   },
+                  qosType: {
+                    label: 'label.qos.type',
+                    docID: 'helpDiskOfferingQoSType',
+                    select: function(args) {
+                      var items = [];
+                      items.push({id: '', description: ''});
+                      items.push({id: 'hypervisor', description: 'hypervisor'});
+                      items.push({id: 'storage', description: 'storage'});
+                      args.response.success({data: items});
+                      
+                      args.$select.change(function() {
+                      	var $form = $(this).closest('form');
+                        var $isCustomizedIops = $form.find('.form-item[rel=isCustomizedIops]');
+                        var $minIops = $form.find('.form-item[rel=minIops]');
+                        var $maxIops = $form.find('.form-item[rel=maxIops]');
+                        var $diskBytesReadRate = $form.find('.form-item[rel=diskBytesReadRate]');
+                        var $diskBytesWriteRate = $form.find('.form-item[rel=diskBytesWriteRate]');
+                        var $diskIopsReadRate = $form.find('.form-item[rel=diskIopsReadRate]');
+                        var $diskIopsWriteRate = $form.find('.form-item[rel=diskIopsWriteRate]');
+                        
+                        var qosId = $(this).val();
+                        
+                        if (qosId == 'storage') { // Storage QoS
+                          $diskBytesReadRate.hide();
+                          $diskBytesWriteRate.hide();
+                          $diskIopsReadRate.hide();
+                          $diskIopsWriteRate.hide();
+                          
+                          $isCustomizedIops.css('display', 'inline-block');
+
+                          if ($isCustomizedIops == true) {
+                            $minIops.css('display', 'inline-block');
+                            $maxIops.css('display', 'inline-block');
+                          }
+                          else {
+                            $minIops.hide();
+                            $maxIops.hide();
+                          }
+                        }
+                        else if (qosId == 'hypervisor') { // Hypervisor Qos
+                          $isCustomizedIops.hide();
+                          $minIops.hide();
+                          $maxIops.hide();
+                          
+                          $diskBytesReadRate.css('display', 'inline-block');
+                          $diskBytesWriteRate.css('display', 'inline-block');
+                          $diskIopsReadRate.css('display', 'inline-block');
+                          $diskIopsWriteRate.css('display', 'inline-block');
+                        }
+                        else { // No Qos
+                          $diskBytesReadRate.hide();
+                          $diskBytesWriteRate.hide();
+                          $diskIopsReadRate.hide();
+                          $diskIopsWriteRate.hide();
+                          $isCustomizedIops.hide();
+                          $minIops.hide();
+                          $maxIops.hide();
+                        }
+                      });
+                    }
+                  },
+                  isCustomizedIops: {
+                    label: 'label.custom.disk.iops',
+                    docID: 'helpDiskOfferingCustomDiskIops',
+                    isBoolean: true,
+                    isReverse: true,
+                    isChecked: false
+                  },
+                  minIops: {
+                    label: 'label.disk.iops.min',
+                    docID: 'helpDiskOfferingDiskIopsMin',
+                    dependsOn: 'isCustomizedIops',
+                    validation: { required: false, number: true }
+                  },
+                  maxIops: {
+                    label: 'label.disk.iops.max',
+                    docID: 'helpDiskOfferingDiskIopsMax',
+                    dependsOn: 'isCustomizedIops',
+                    validation: { required: false, number: true }
+                  },
                   diskBytesReadRate: {
                       label: 'label.disk.bytes.read.rate',
                       validation: {
@@ -1080,18 +1160,65 @@
 
               action: function(args) {
                 var data = {
-								  isMirrored: false,
+								    isMirrored: false,
 									name: args.data.name,
 									displaytext: args.data.description,
 									storageType: args.data.storageType,
 									customized: (args.data.isCustomized=="on")
-								};																
-               
+								};
+               	
                 if(args.$form.find('.form-item[rel=disksize]').css("display") != "none") {
 								  $.extend(data, {
 									  disksize: args.data.disksize
-									});		
-								}
+									});
+				}
+				
+				if (args.data.qosType == 'storage') {
+					var customIops = args.data.isCustomizedIops == "on";
+					
+					$.extend(data, {
+						customizediops: customIops
+					});
+					
+					if (!customIops) {
+				   	   if (args.data.minIops != null && args.data.minIops.length > 0) {
+					   	   $.extend(data, {
+							   miniops: args.data.minIops
+						   });
+						}
+
+						if(args.data.maxIops != null && args.data.maxIops.length > 0) {
+					   	   $.extend(data, {
+					       	   maxiops: args.data.maxIops
+					   	   });
+					   	}
+					}
+				}
+				else if (args.data.qosType == 'hypervisor') {
+					if (args.data.diskBytesReadRate != null && args.data.diskBytesReadRate.length > 0) {
+                        $.extend(data, {
+                            bytesreadrate: args.data.diskBytesReadRate
+                        });
+                    }
+                    
+                	if (args.data.diskBytesWriteRate != null && args.data.diskBytesWriteRate.length > 0) {
+                        $.extend(data, {
+                            byteswriterate: args.data.diskBytesWriteRate
+                        });
+                    }
+                
+                	if (args.data.diskIopsReadRate != null && args.data.diskIopsReadRate.length > 0) {
+                        $.extend(data, {
+                            iopsreadrate: args.data.diskIopsReadRate
+                        });
+                    }
+                
+                	if (args.data.diskIopsWriteRate != null && args.data.diskIopsWriteRate.length > 0) {
+                        $.extend(data, {
+                            iopswriterate: args.data.diskIopsWriteRate
+                        });
+                    }
+				}
 
                 if(args.data.tags != null && args.data.tags.length > 0) {
 								  $.extend(data, {
@@ -1104,26 +1231,6 @@
 									  domainid: args.data.domainId
 									});		
 								}
-                if(args.data.diskBytesReadRate != null && args.data.diskBytesReadRate.length > 0) {
-                                                                  $.extend(data, {
-                                                                          bytesreadrate: args.data.diskBytesReadRate
-                                                                        });
-                                                                }
-                if(args.data.diskBytesWriteRate != null && args.data.diskBytesWriteRate.length > 0) {
-                                                                  $.extend(data, {
-                                                                          byteswriterate: args.data.diskBytesWriteRate
-                                                                        });
-                                                                }
-                if(args.data.diskIopsReadRate != null && args.data.diskIopsReadRate.length > 0) {
-                                                                  $.extend(data, {
-                                                                          iopsreadrate: args.data.diskIopsReadRate
-                                                                        });
-                                                                }
-                if(args.data.diskIopsWriteRate != null && args.data.diskIopsWriteRate.length > 0) {
-                                                                  $.extend(data, {
-                                                                          iopswriterate: args.data.diskIopsWriteRate
-                                                                        });
-                                                                }
 
                 $.ajax({
                   url: createURL('createDiskOffering'),
@@ -1236,6 +1343,28 @@
                           return "N/A";
                       }
                     },
+                    iscustomizediops: {
+                      label: 'label.custom.disk.iops',
+                      converter: cloudStack.converters.toBooleanText
+                    },
+                    miniops: {
+                      label: 'label.disk.iops.min',
+                      converter: function(args) {
+                        if(args > 0)
+                          return args;
+                        else
+                          return "N/A";
+                      }
+                    },
+                    maxiops: {
+                      label: 'label.disk.iops.max',
+                      converter: function(args) {
+                        if(args > 0)
+                          return args;
+                        else
+                          return "N/A";
+                      }
+                    },
                     diskBytesReadRate: { label: 'label.disk.bytes.write.rate' },
                     diskBytesWriteRate: { label: 'label.disk.bytes.write.rate' },
                     diskIopsReadRate: { label: 'label.disk.iops.write.rate' },

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/ui/scripts/docs.js
----------------------------------------------------------------------
diff --git a/ui/scripts/docs.js b/ui/scripts/docs.js
index 5aa352a..635e619 100755
--- a/ui/scripts/docs.js
+++ b/ui/scripts/docs.js
@@ -270,6 +270,10 @@ cloudStack.docs = {
   desc: 'Type of disk for the VM. Local is attached to the hypervisor host where the VM is running. Shared is storage accessible via NFS.',
   externalLink: ''
   },
+  helpDiskOfferingQoSType: {
+  desc: 'Type of Quality of Service desired, if any.',
+  externalLink: ''
+  },
   helpDiskOfferingCustomDiskSize: {
   desc: 'If checked, the user can set their own disk size. If not checked, the root administrator must define a value in Disk Size.',
   externalLink: ''
@@ -278,6 +282,18 @@ cloudStack.docs = {
   desc: 'Appears only if Custom Disk Size is not selected. Define the volume size in GB.',
   externalLink: ''
   },
+  helpDiskOfferingCustomDiskIops: {
+  desc: 'If checked, the user can set Min and Max IOPS. If not checked, the root administrator can define such values.',
+  externalLink: ''
+  },
+  helpDiskOfferingDiskIopsMin: {
+  desc: 'Appears only if Custom IOPS is not selected. Define the minimum volume IOPS.',
+  externalLink: ''
+  },
+  helpDiskOfferingDiskIopsMax: {
+  desc: 'Appears only if Custom IOPS is not selected. Define the maximum volume IOPS.',
+  externalLink: ''
+  },
   helpDiskOfferingStorageTags: {
   desc: 'Comma-separated list of attributes that should be associated with the primary storage for this disk. For example "ssd,blue".',
   externalLink: ''


[21/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Set all templates/volumes to Ready in the simulator

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/57641d85
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/57641d85
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/57641d85

Branch: refs/heads/vmsync
Commit: 57641d85f7a1b235098ea8cd3e8927c883c86b76
Parents: 0dc5b0d
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Thu Jun 27 20:16:42 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:53:41 2013 +0530

----------------------------------------------------------------------
 client/tomcatconf/nonossComponentContext.xml.in |  1 -
 .../driver/SimulatorImageStoreDriverImpl.java   | 45 ++++++++++----------
 2 files changed, 23 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57641d85/client/tomcatconf/nonossComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in
index 4ea1d31..e7828e1 100644
--- a/client/tomcatconf/nonossComponentContext.xml.in
+++ b/client/tomcatconf/nonossComponentContext.xml.in
@@ -183,7 +183,6 @@
      
 -->
 
-
   <!--Motion Strategies-->
   <bean id="vmwareStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.VmwareStorageMotionStrategy" />
   <bean id="dataMotionServiceImpl" class="org.apache.cloudstack.storage.motion.DataMotionServiceImpl">

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/57641d85/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
index 74177ed..8816fc2 100644
--- a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
+++ b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
@@ -25,6 +25,7 @@ import com.cloud.agent.api.to.DataObjectType;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.NfsTO;
 import com.cloud.storage.Storage;
+import com.cloud.storage.VMTemplateStorageResourceAssoc;
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.VMTemplateDao;
@@ -32,6 +33,7 @@ import com.cloud.storage.dao.VolumeDao;
 import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.async.AsyncRpcContext;
@@ -41,6 +43,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
 import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl;
 import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
 
 import javax.inject.Inject;
 import java.util.Date;
@@ -93,57 +97,54 @@ public class SimulatorImageStoreDriverImpl extends BaseImageStoreDriverImpl {
 
     protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
                                                CreateContext<CreateCmdResult> context) {
-        DownloadAnswer answer = callback.getResult();
         DataObject obj = context.data;
         DataStore store = obj.getDataStore();
+        TemplateObjectTO templateTO = (TemplateObjectTO)context.data.getTO();
 
         TemplateDataStoreVO tmpltStoreVO = _templateStoreDao.findByStoreTemplate(store.getId(), obj.getId());
         if (tmpltStoreVO != null) {
             TemplateDataStoreVO updateBuilder = _templateStoreDao.createForUpdate();
-            updateBuilder.setDownloadPercent(answer.getDownloadPct());
-            updateBuilder.setDownloadState(answer.getDownloadStatus());
+            updateBuilder.setDownloadPercent(100);
+            updateBuilder.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
             updateBuilder.setLastUpdated(new Date());
-            updateBuilder.setErrorString(answer.getErrorString());
-            updateBuilder.setJobId(answer.getJobId());
-            updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
-            updateBuilder.setInstallPath(answer.getInstallPath());
-            updateBuilder.setSize(answer.getTemplateSize());
-            updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
+            updateBuilder.setSize(new Long(5 * 1024L * 1024L));
+            updateBuilder.setPhysicalSize(new Long(5 * 1024L * 1024L));
+            updateBuilder.setDownloadUrl(templateTO.getOrigUrl());
+            updateBuilder.setInstallPath(templateTO.getPath());
+            updateBuilder.setTemplateId(templateTO.getId());
+            updateBuilder.setState(ObjectInDataStoreStateMachine.State.Ready);
             _templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
             // update size in vm_template table
             VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
-            tmlptUpdater.setSize(answer.getTemplateSize());
+            tmlptUpdater.setSize(new Long(5 * 1024l * 1024l));
             _templateDao.update(obj.getId(), tmlptUpdater);
         }
-
         return null;
     }
 
     protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
                                              CreateContext<CreateCmdResult> context) {
-        DownloadAnswer answer = callback.getResult();
         DataObject obj = context.data;
         DataStore store = obj.getDataStore();
+        VolumeObjectTO volumeTO = (VolumeObjectTO) context.data.getTO();
 
         VolumeDataStoreVO volStoreVO = _volumeStoreDao.findByStoreVolume(store.getId(), obj.getId());
         if (volStoreVO != null) {
             VolumeDataStoreVO updateBuilder = _volumeStoreDao.createForUpdate();
-            updateBuilder.setDownloadPercent(answer.getDownloadPct());
-            updateBuilder.setDownloadState(answer.getDownloadStatus());
+            updateBuilder.setDownloadPercent(100);
+            updateBuilder.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
             updateBuilder.setLastUpdated(new Date());
-            updateBuilder.setErrorString(answer.getErrorString());
-            updateBuilder.setJobId(answer.getJobId());
-            updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
-            updateBuilder.setInstallPath(answer.getInstallPath());
-            updateBuilder.setSize(answer.getTemplateSize());
-            updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
+            updateBuilder.setInstallPath(volumeTO.getPath());
+            updateBuilder.setVolumeId(volumeTO.getVolumeId());
+            updateBuilder.setSize(volumeTO.getSize());
+            updateBuilder.setPhysicalSize(volumeTO.getSize());
+            updateBuilder.setState(ObjectInDataStoreStateMachine.State.Ready);
             _volumeStoreDao.update(volStoreVO.getId(), updateBuilder);
             // update size in volume table
             VolumeVO volUpdater = _volumeDao.createForUpdate();
-            volUpdater.setSize(answer.getTemplateSize());
+            volUpdater.setSize(volumeTO.getSize());
             _volumeDao.update(obj.getId(), volUpdater);
         }
-
         return null;
     }
 }
\ No newline at end of file


[35/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Fix attr plugin import and class reference

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/34eabd95
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/34eabd95
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/34eabd95

Branch: refs/heads/vmsync
Commit: 34eabd957c8b1f8e7587cd9206bfc3f7c886293b
Parents: 40f3852
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sun Jun 30 21:43:08 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sun Jun 30 21:44:16 2013 +0530

----------------------------------------------------------------------
 test/integration/component/test_advancedsg_networks.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/34eabd95/test/integration/component/test_advancedsg_networks.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_advancedsg_networks.py b/test/integration/component/test_advancedsg_networks.py
index f8774be..4834351 100644
--- a/test/integration/component/test_advancedsg_networks.py
+++ b/test/integration/component/test_advancedsg_networks.py
@@ -24,9 +24,8 @@ from marvin.cloudstackAPI import *
 from marvin.integration.lib.utils import *
 from marvin.integration.lib.base import *
 from marvin.integration.lib.common import *
-from marvin.remoteSSHClient import remoteSSHClient
-import datetime
 import netaddr
+from nose.plugins.attrib import attr
 
 class Services:
     """ Test networks in advanced zone with security groups"""
@@ -156,7 +155,7 @@ class TestNetworksInAdvancedSG(cloudstackTestCase):
     @classmethod
     def setUpClass(cls):
         cls.api_client = super(
-                               TestSharedNetworks,
+                               TestNetworksInAdvancedSG,
                                cls
                                ).getClsTestClient().getApiClient()
         
@@ -687,7 +686,7 @@ class TestNetworksInAdvancedSG(cloudstackTestCase):
             "The network offering state should get updated to Enabled."
             )
         
-        physical_network = list_physical_networks_response[0]        
+        physical_network = PhysicalNetwork.list(self.api_client)[0]
 
 	    #create network using the shared network offering created
         self.services["shared_network_sg"]["acltype"] = "domain"


[27/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Add docs for plugin development


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/15a68447
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/15a68447
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/15a68447

Branch: refs/heads/vmsync
Commit: 15a6844784142141af22224489a57d7d748c6687
Parents: 676d874
Author: Ian Duffy <ia...@ianduffy.ie>
Authored: Fri Jun 28 13:33:20 2013 +0100
Committer: Sebastien Goasguen <ru...@gmail.com>
Committed: Sat Jun 29 15:52:36 2013 -0400

----------------------------------------------------------------------
 docs/en-US/creating-a-plugin.xml        |  29 ++++
 docs/en-US/creating-my-first-plugin.xml | 216 +++++++++++++++++++++++++++
 docs/en-US/plugin-development.xml       |  28 ++++
 3 files changed, 273 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15a68447/docs/en-US/creating-a-plugin.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/creating-a-plugin.xml b/docs/en-US/creating-a-plugin.xml
new file mode 100644
index 0000000..448d4e6
--- /dev/null
+++ b/docs/en-US/creating-a-plugin.xml
@@ -0,0 +1,29 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<chapter id="plugin-development">
+  <title>Plugin Development</title>
+  <para>This chapter will detail different elements related to the development of plugins within Cloudstack</para>
+  <xi:include href="creating-my-first-plugin.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</chapter>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15a68447/docs/en-US/creating-my-first-plugin.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/creating-my-first-plugin.xml b/docs/en-US/creating-my-first-plugin.xml
new file mode 100644
index 0000000..3809fd3
--- /dev/null
+++ b/docs/en-US/creating-my-first-plugin.xml
@@ -0,0 +1,216 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+<section id="creating-my-first-plugin">
+    <title>Creating my first plugin</title>
+    <para>This is a brief walk through of creating a simple plugin that adds an additional command to the API to return the message "Hello World".</para>
+    <section id="letting-cloudstack-know-about-the-plugin">
+        <title>Letting Cloudstack know about the plugin</title>
+        <para>Before we can being we need to tell Cloudstack about the existance of our plugin. In order to do this we are required to edit some files related to the cloud-client-ui module</para>
+        <orderedlist>
+            <listitem>
+                <para>Navigate to the folder called client</para>
+            </listitem>
+            <listitem>
+                <para>Open pom.xml and add a dependency, this will look something like the following: </para>
+                <example>
+                    <title>client/pom.xml</title>
+                    <programlisting language="XML">&lt;dependency&gt;
+  &lt;groupId&gt;org.apache.cloudstack&lt;/groupId&gt;
+  &lt;artifactId&gt;cloud-plugin-api-helloworld&lt;/artifactId&gt;
+  &lt;version&gt;${project.version}&lt;/version&gt;
+&lt;/dependency&gt;</programlisting>
+                </example>
+            </listitem>
+            <listitem>
+                <para>Continuing with client as your working directory open up tomcatconf/applicationContext.xml.in</para>
+            </listitem>
+            <listitem>
+                <para>Within this file we must insert a bean to load our class:</para>
+                <example>
+                    <title>client/tomcatconf/applicationContext.xml.in</title>
+                    <programlisting language="XML">&lt;bean id="helloWorldImpl" class="org.apache.cloudstack.helloworld.HelloWorldImpl" /&gt;</programlisting>
+                </example>
+            </listitem>
+            <listitem>
+                <para>Finally we need to register the additional API commands we add. Again with client as your working directory this is done by modifying tomcatconf/commands.properties.in</para>
+            </listitem>
+            <listitem>
+                <para>Within the file we simply add the names of the API commands we want to create followed by a permission number. 1 = admin, 2 = resource domain admin, 4 = domain admin, 8 = user.</para>
+                <example>
+                    <title>tomcatconf/commands.properties.in</title>
+                    <programlisting>helloWorld=8</programlisting>
+                </example>
+            </listitem>
+        </orderedlist>
+    </section>
+    <section id="creating-the-plugin">
+        <title>Creating the plugin</title>
+        <para>Within the Cloudstack filing structure all plugins live under the plugins folder. Since the sample plugin for this document is going to be API related it will live in plugins/api/helloworld. Along with this we will need a standard maven package layout, so lets create all the required folders:</para>
+        <programlisting language="Bash">$ mkdir -p plugins/api/helloworld/{src,target,test}
+$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/{api,helloworld}
+$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/api/{command,response}
+$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/api/command/user/helloworld</programlisting>
+        <para>With helloworld as our working directory we should have a tree layout like the following:</para>
+        <programlisting language="Bash">$ cd plugins/api/helloworld
+$ tree
+.
+|-- src
+|   `-- org
+|       `-- apache
+|           `-- cloudstack
+|               |-- api
+|               |   |-- command
+|               |   |   `-- user
+|               |   |       `-- helloworld
+|               |   |-- response
+|               `-- helloworld
+|-- target
+`-- test
+ 
+12 directories, 0 files</programlisting>
+        <para>First we will create a pom.xml for our plugin:</para>
+        <example>
+            <title>plugins/api/helloworld/pom.xml</title>
+            <programlisting language="XML">&lt;project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"&gt;
+  &lt;modelVersion&gt;4.0.0&lt;/modelVersion&gt;
+  &lt;artifactId&gt;cloud-plugin-api-helloworld&lt;/artifactId&gt;
+  &lt;name&gt;Apache CloudStack Plugin - Hello World Plugin&lt;/name&gt;
+  &lt;parent&gt;
+    &lt;groupId&gt;org.apache.cloudstack&lt;/groupId&gt;
+    &lt;artifactId&gt;cloudstack-plugins&lt;/artifactId&gt;
+    &lt;version&gt;4.2.0-SNAPSHOT&lt;/version&gt;
+    &lt;relativePath&gt;../../pom.xml&lt;/relativePath&gt;
+  &lt;/parent&gt;
+  &lt;dependencies&gt;
+    &lt;dependency&gt;
+      &lt;groupId&gt;org.apache.cloudstack&lt;/groupId&gt;
+      &lt;artifactId&gt;cloud-api&lt;/artifactId&gt;
+      &lt;version&gt;${project.version}&lt;/version&gt;
+    &lt;/dependency&gt;
+    &lt;dependency&gt;
+      &lt;groupId&gt;org.apache.cloudstack&lt;/groupId&gt;
+      &lt;artifactId&gt;cloud-utils&lt;/artifactId&gt;
+      &lt;version&gt;${project.version}&lt;/version&gt;
+    &lt;/dependency&gt;
+  &lt;/dependencies&gt;
+  &lt;build&gt;
+    &lt;defaultGoal&gt;install&lt;/defaultGoal&gt;
+    &lt;sourceDirectory&gt;src&lt;/sourceDirectory&gt;
+    &lt;testSourceDirectory&gt;test&lt;/testSourceDirectory&gt;
+  &lt;/build&gt;
+&lt;/project&gt;</programlisting>
+        </example>
+        <para>Next we need to make the root plugin pom aware of our plugin to do this simply edit plugins/pom.xml inserting a line like the following:</para>
+        <programlisting language="XML">......
+&lt;module&gt;api/helloworld&lt;/module&gt;
+......</programlisting>
+        <para>Finally we will being to create code for your plugin. Create an interface called HelloWorld that will extend PluggableService within src/org/apache/cloudstack/hellowold</para>
+        <programlisting language="Java">package org.apache.cloudstack.helloworld;
+  
+import com.cloud.utils.component.PluggableService;
+  
+public interface HelloWorld extends PluggableService { }</programlisting>
+        <para>Create an implementation of HelloWorld called HelloWorldImpl:</para>
+        <programlisting language="Java">package org.apache.cloudstack.helloworld;
+ 
+import org.apache.cloudstack.api.command.user.helloworld.HelloWorldCmd;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+ 
+import javax.ejb.Local;
+import java.util.*;
+ 
+@Component
+@Local(value = HelloWorld.class)
+public class HelloWorldImpl implements HelloWorld {
+    private static final Logger s_logger = Logger.getLogger(HelloWorldImpl.class);
+     
+    public HelloWorldImpl() {
+        super();
+    }
+    /**
+     * This informs cloudstack of the API commands you are creating.
+     */
+    @Override
+    public List&lt;Class&lt;?&gt;&gt; getCommands() {
+        List&lt;Class&lt;?&gt;&gt; cmdList = new ArrayList&lt;Class&lt;?&gt;&gt;();
+        cmdList.add(HelloWorldCmd.class);
+        return cmdList;
+    }
+}</programlisting>
+        <para>Next we will create our API command navigate to src/org/apache/cloudstack/api/command/user/helloworld and open up HelloWorldCmd.java, create it as follows</para>
+        <programlisting language="Java">package org.apache.cloudstack.api.command.user.helloworld;
+ 
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.response.HelloWorldResponse;
+import org.apache.log4j.Logger;
+ 
+// Note this name matches the name you inserted into client/tomcatconf/commands.properties.in
+@APICommand(name = "helloWorld", responseObject = HelloWorldResponse.class, description = "Returns a hello world message", since = "4.2.0")
+public class HelloWorldCmd extends BaseCmd {
+    public static final Logger s_logger = Logger.getLogger(HelloWorldCmd.class.getName());
+    private static final String s_name = "helloworldresponse";
+  
+    @Override
+    public void execute()
+    {
+        HelloWorldResponse response = new HelloWorldResponse();
+        response.setObjectName("helloworld");
+        response.setResponseName(getCommandName());
+        this.setResponseObject(response);
+    }
+  
+    @Override
+    public String getCommandName() {
+        return s_name;
+    }
+  
+    @Override
+    public long getEntityOwnerId() {
+        return 0;
+    }
+}</programlisting>
+        <para>Finally we need to create our HelloWorldResponse class, this will exist within src/org/apache/cloudstack/api/response/</para>
+        <programlisting language="Java">package org.apache.cloudstack.api.response;
+ 
+import com.google.gson.annotations.SerializedName;
+import org.apache.cloudstack.api.BaseResponse;
+import com.cloud.serializer.Param;
+ 
+@SuppressWarnings("unused")
+public class HelloWorldResponse extends BaseResponse {
+    @SerializedName("HelloWorld") @Param(description="HelloWorld Response")
+    private String  HelloWorld;
+  
+    public HelloWorldResponse(){
+        this.HelloWorld = "Hello World";
+    }
+}</programlisting>
+    </section>
+    <section id="compiling-your-plugin">
+        <title>Compiling your plugin:</title>
+        <para>Within the directory of your plugin i.e. plugins/api/helloworld run mvn clean install.</para>
+        <para>After this we need to recompile the client-cloud-ui to do this come back to the cloudstack base directory and execute mvn -pl client clean install</para>
+    </section>
+    <section id="starting-cloudstack-and-testing">
+        <title>Starting Cloudstack and Testing:</title>
+        <para>Start up cloudstack with the normal mvn pl :client-cloud-ui jetty:run, wait a few moments for it to start up then head over to: localhost:8096/client/api?command=helloWorld and you should see your HelloWorld message.</para>
+    </section>
+</section>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15a68447/docs/en-US/plugin-development.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/plugin-development.xml b/docs/en-US/plugin-development.xml
new file mode 100644
index 0000000..0492877
--- /dev/null
+++ b/docs/en-US/plugin-development.xml
@@ -0,0 +1,28 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
+<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
+%BOOK_ENTITIES;
+]>
+
+<!-- Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+   http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<chapter id="plugin-development">
+  <title>Plugin Development</title>
+  <xi:include href="creating-my-first-plugin.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+</chapter>


[34/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
It makes more sense to print the task uuid than the task type in the trace message

Add retry counter to exiting pong command message

Some additional code cleanup courtesy eclipse save actions, thanks Alex


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/40f38521
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/40f38521
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/40f38521

Branch: refs/heads/vmsync
Commit: 40f38521c4bcb17d2a90896d6837e131e6c1afe5
Parents: 4c0425f
Author: Hugo Trippaers <ht...@schubergphilis.com>
Authored: Sat Jun 29 21:29:38 2013 -0700
Committer: Hugo Trippaers <ht...@schubergphilis.com>
Committed: Sun Jun 30 12:10:59 2013 +0200

----------------------------------------------------------------------
 .../xen/resource/CitrixResourceBase.java        | 306 ++++++++-----------
 1 file changed, 135 insertions(+), 171 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/40f38521/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index 6f2e842..e07df0b 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -11,7 +11,7 @@
 // Unless required by applicable law or agreed to in writing,
 // software distributed under the License is distributed on an
 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the 
+// KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
 package com.cloud.hypervisor.xen.resource;
@@ -758,9 +758,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             Set<VM> vmSnapshots = VM.getByNameLabel(conn, cmd.getTarget().getSnapshotName());
             if(vmSnapshots.size() == 0)
                 return new RevertToVMSnapshotAnswer(cmd, false, "Cannot find vmSnapshot with name: " + cmd.getTarget().getSnapshotName());
-            
+
             VM vmSnapshot = vmSnapshots.iterator().next();
-            
+
             // find target VM or creating a work VM
             try {
                 vm = getVM(conn, vmName);
@@ -772,7 +772,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 return new RevertToVMSnapshotAnswer(cmd, false,
                         "Revert to VM Snapshot Failed due to can not find vm: " + vmName);
             }
-            
+
             // call plugin to execute revert
             revertToSnapshot(conn, vmSnapshot, vmName, vm.getUuid(conn), snapshotMemory, _host.uuid);
             vm = getVM(conn, vmName);
@@ -807,13 +807,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             s_logger.error("revert vm " + vmName
                     + " to snapshot " + cmd.getTarget().getSnapshotName() + " failed due to " + e.getMessage());
             return new RevertToVMSnapshotAnswer(cmd, false, e.getMessage());
-        } 
+        }
     }
 
     private String revertToSnapshot(Connection conn, VM vmSnapshot,
             String vmName, String oldVmUuid, Boolean snapshotMemory, String hostUUID)
-            throws XenAPIException, XmlRpcException {
- 
+                    throws XenAPIException, XmlRpcException {
+
         String results = callHostPluginAsync(conn, "vmopsSnapshot",
                 "revert_memory_snapshot", 10 * 60 * 1000, "snapshotUUID",
                 vmSnapshot.getUuid(conn), "vmName", vmName, "oldVmUuid",
@@ -960,10 +960,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 nw = Network.create(conn, rec);
                 // Plug dom0 vif only when creating network
                 enableXenServerNetwork(conn, nw, nwName, "tunnel network for account " + key);
-                s_logger.debug("### Xen Server network for tunnels created:" + nwName);                
+                s_logger.debug("### Xen Server network for tunnels created:" + nwName);
             } else {
                 nw = networks.iterator().next();
-                s_logger.debug("Xen Server network for tunnels found:" + nwName);                
+                s_logger.debug("Xen Server network for tunnels found:" + nwName);
             }
             return nw;
         } catch (Exception e) {
@@ -1092,10 +1092,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         vifr.device = Integer.toString(nic.getDeviceId());
         vifr.MAC = nic.getMac();
 
-        // Nicira needs these IDs to find the NIC 
+        // Nicira needs these IDs to find the NIC
         vifr.otherConfig = new HashMap<String, String>();
         vifr.otherConfig.put("nicira-iface-id", nic.getUuid());
-        vifr.otherConfig.put("nicira-vm-id", vm.getUuid(conn)); 
+        vifr.otherConfig.put("nicira-vm-id", vm.getUuid(conn));
 
         vifr.network = getNetwork(conn, nic);
 
@@ -1112,7 +1112,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             vifr = vif.getRecord(conn);
             s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId());
         }
-        
+
         return vif;
     }
 
@@ -1157,12 +1157,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         if (type == Volume.Type.ISO) {
             TemplateObjectTO iso = (TemplateObjectTO)data;
             DataStoreTO store = iso.getDataStore();
-            
+
             if (store == null) {
                 //It's a fake iso
                 return null;
             }
-            
+
             //corer case, xenserver pv driver iso
             String templateName = iso.getName();
             if (templateName.startsWith("xs-tools")) {
@@ -1178,7 +1178,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString());
                 }
             }
-            
+
             if (!(store instanceof NfsTO)) {
                 throw new CloudRuntimeException("only support mount iso on nfs");
             }
@@ -1408,7 +1408,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 if (network.getNameLabel(conn).startsWith("VLAN")) {
                     disableVlanNetwork(conn, network);
                 }
-            }                    
+            }
         } catch (Exception e) {
             s_logger.warn("VM getRecord failed due to ", e);
         }
@@ -1543,54 +1543,54 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             }
         }
     }
-    
+
     private Answer execute(PvlanSetupCommand cmd) {
-    	Connection conn = getConnection();
-    	
-    	String primaryPvlan = cmd.getPrimary();
-    	String isolatedPvlan = cmd.getIsolated();
-    	String op = cmd.getOp();
-    	String dhcpName = cmd.getDhcpName();
-    	String dhcpMac = cmd.getDhcpMac();
-    	String dhcpIp = cmd.getDhcpIp();
-    	String vmMac = cmd.getVmMac();
-    	String networkTag = cmd.getNetworkTag();
-    	
-    	XsLocalNetwork nw = null;
-    	String nwNameLabel = null;
-    	try {
-			nw = getNativeNetworkForTraffic(conn, TrafficType.Guest, networkTag);
-			nwNameLabel = nw.getNetwork().getNameLabel(conn);
-		} catch (XenAPIException e) {
-			s_logger.warn("Fail to get network", e);
-    		return new Answer(cmd, false, e.toString());
-		} catch (XmlRpcException e) {
-			s_logger.warn("Fail to get network", e);
-    		return new Answer(cmd, false, e.toString());
-		}
-    	
-    	String result = null;
-    	if (cmd.getType() == PvlanSetupCommand.Type.DHCP) {
-    		result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-dhcp", "op", op, "nw-label", nwNameLabel,
-    				"primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "dhcp-name", dhcpName,
-    				"dhcp-ip", dhcpIp, "dhcp-mac", dhcpMac);
-    		if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
-    			s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac);
-    			return new Answer(cmd, false, result);
-    		} else {
-    			s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac);
-    		}
-    	} else if (cmd.getType() == PvlanSetupCommand.Type.VM) {
-    		result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-vm", "op", op, "nw-label", nwNameLabel,
-    				"primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "vm-mac", vmMac);
-    		if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
-    			s_logger.warn("Failed to program pvlan for vm with mac " + vmMac);
-    			return new Answer(cmd, false, result);
-    		} else {
-    			s_logger.info("Programmed pvlan for vm with mac " + vmMac);
-    		}
-    	}
-    	return new Answer(cmd, true, result);
+        Connection conn = getConnection();
+
+        String primaryPvlan = cmd.getPrimary();
+        String isolatedPvlan = cmd.getIsolated();
+        String op = cmd.getOp();
+        String dhcpName = cmd.getDhcpName();
+        String dhcpMac = cmd.getDhcpMac();
+        String dhcpIp = cmd.getDhcpIp();
+        String vmMac = cmd.getVmMac();
+        String networkTag = cmd.getNetworkTag();
+
+        XsLocalNetwork nw = null;
+        String nwNameLabel = null;
+        try {
+            nw = getNativeNetworkForTraffic(conn, TrafficType.Guest, networkTag);
+            nwNameLabel = nw.getNetwork().getNameLabel(conn);
+        } catch (XenAPIException e) {
+            s_logger.warn("Fail to get network", e);
+            return new Answer(cmd, false, e.toString());
+        } catch (XmlRpcException e) {
+            s_logger.warn("Fail to get network", e);
+            return new Answer(cmd, false, e.toString());
+        }
+
+        String result = null;
+        if (cmd.getType() == PvlanSetupCommand.Type.DHCP) {
+            result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-dhcp", "op", op, "nw-label", nwNameLabel,
+                    "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "dhcp-name", dhcpName,
+                    "dhcp-ip", dhcpIp, "dhcp-mac", dhcpMac);
+            if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac);
+                return new Answer(cmd, false, result);
+            } else {
+                s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac);
+            }
+        } else if (cmd.getType() == PvlanSetupCommand.Type.VM) {
+            result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-vm", "op", op, "nw-label", nwNameLabel,
+                    "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "vm-mac", vmMac);
+            if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                s_logger.warn("Failed to program pvlan for vm with mac " + vmMac);
+                return new Answer(cmd, false, result);
+            } else {
+                s_logger.info("Programmed pvlan for vm with mac " + vmMac);
+            }
+        }
+        return new Answer(cmd, true, result);
     }
 
     @Override
@@ -1717,7 +1717,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             return new StartAnswer(cmd, msg);
         } finally {
             synchronized (_cluster.intern()) {
-                if (state != State.Stopped) { 
+                if (state != State.Stopped) {
                     s_vms.put(_cluster, _name, vmName, state);
                     s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
                 } else {
@@ -2162,7 +2162,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         Connection conn = getConnection();
         String args = "-r " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
         if (cmd.getVmIpAddress() != null) {
-        args += " -v " + cmd.getVmIpAddress();
+            args += " -v " + cmd.getVmIpAddress();
         }
         args += " -m " + cmd.getVmMac();
         args += " -n " + cmd.getVmName();
@@ -2178,14 +2178,14 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
 
         if (cmd.getVmIp6Address() != null) {
-        	args += " -6 " + cmd.getVmIp6Address();
-        	args += " -u " + cmd.getDuid();
+            args += " -6 " + cmd.getVmIp6Address();
+            args += " -u " + cmd.getDuid();
         }
-        
+
         if (!cmd.isDefault()) {
-        	args += " -z";
+            args += " -z";
         }
-        
+
         String result = callHostPlugin(conn, "vmops", "saveDhcpEntry", "args", args);
         if (result == null || result.isEmpty()) {
             return new Answer(cmd, false, "DhcpEntry failed");
@@ -2409,7 +2409,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     s_logger.debug("VIF to deassociate IP with does not exist, return success");
                     return;
                 }
-            }           
+            }
 
             String args = "vpc_ipassoc.sh " + routerIp;
             String snatArgs = "vpc_privateGateway.sh " + routerIp;
@@ -2672,14 +2672,14 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(),vmDiskStatsNameMap);
         }
     }
-    
+
     private HashMap<String, List<VmDiskStatsEntry>> getVmDiskStats(Connection conn, GetVmDiskStatsCommand cmd, List<String> vmUUIDs, String hostGuid) {
         HashMap<String, List<VmDiskStatsEntry>> vmResponseMap = new HashMap<String, List<VmDiskStatsEntry>>();
 
         for (String vmUUID : vmUUIDs) {
             vmResponseMap.put(vmUUID, new ArrayList<VmDiskStatsEntry>());
         }
-        
+
         try {
             for (String vmUUID : vmUUIDs) {
                 VM vm = VM.getByUuid(conn, vmUUID);
@@ -2700,7 +2700,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             s_logger.warn("Error while collecting disk stats from : ", e);
             return null;
         }
-        
+
         return vmResponseMap;
     }
 
@@ -3101,27 +3101,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
-    private String copy_vhd_to_secondarystorage(Connection conn, String mountpoint, String vdiuuid, String sruuid, int wait) {
-        String results = callHostPluginAsync(conn, "vmopspremium", "copy_vhd_to_secondarystorage",
-                wait, "mountpoint", mountpoint, "vdiuuid", vdiuuid, "sruuid", sruuid);
-        String errMsg = null;
-        if (results == null || results.isEmpty()) {
-            errMsg = "copy_vhd_to_secondarystorage return null";
-        } else {
-            String[] tmp = results.split("#");
-            String status = tmp[0];
-            if (status.equals("0")) {
-                return tmp[1];
-            } else {
-                errMsg = tmp[1];
-            }
-        }
-        String source = vdiuuid + ".vhd";
-        killCopyProcess(conn, source);
-        s_logger.warn(errMsg);
-        throw new CloudRuntimeException(errMsg);
-    }
-
     String upgradeSnapshot(Connection conn, String templatePath, String snapshotPath) {
         String results = callHostPluginAsync(conn, "vmopspremium", "upgrade_snapshot",
                 2 * 60 * 60, "templatePath", templatePath, "snapshotPath", snapshotPath);
@@ -3685,12 +3664,14 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     protected void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException {
         long beginTime = System.currentTimeMillis();
         if (s_logger.isTraceEnabled()) {
-            s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getType(c) + ") sent to " + c.getSessionReference() +  " is pending completion with a " + timeout + "ms timeout");
+            s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") sent to " + c.getSessionReference()
+                    + " is pending completion with a " + timeout + "ms timeout");
         }
         while (task.getStatus(c) == Types.TaskStatusType.PENDING) {
             try {
                 if (s_logger.isTraceEnabled()) {
-                    s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getType(c) + ") is pending, sleeping for " + pollInterval + "ms");
+                    s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") is pending, sleeping for "
+                            + pollInterval + "ms");
                 }
                 Thread.sleep(pollInterval);
             } catch (InterruptedException e) {
@@ -3707,7 +3688,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     protected void checkForSuccess(Connection c, Task task) throws XenAPIException, XmlRpcException {
         if (task.getStatus(c) == Types.TaskStatusType.SUCCESS) {
             if (s_logger.isTraceEnabled()) {
-                s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getType(c) + ") completed");
+                s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") completed");
             }
             return;
         } else {
@@ -3857,7 +3838,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             other.put("live", "true");
             task = vm.poolMigrateAsync(conn, destHost, other);
             try {
-                // poll every 1 seconds 
+                // poll every 1 seconds
                 long timeout = (_migratewait) * 1000L;
                 waitForTask(conn, task, 1000, timeout);
                 checkForSuccess(conn, task);
@@ -4182,23 +4163,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         return new StopAnswer(cmd, "Stop VM failed", false);
     }
 
-    private List<VDI> getVdis(Connection conn, VM vm) {
-        List<VDI> vdis = new ArrayList<VDI>();
-        try {
-            Set<VBD> vbds =vm.getVBDs(conn);
-            for( VBD vbd : vbds ) {
-                vdis.add(vbd.getVDI(conn));
-            }
-        } catch (XenAPIException e) {
-            String msg = "getVdis can not get VPD due to " + e.toString();
-            s_logger.warn(msg, e);
-        } catch (XmlRpcException e) {
-            String msg = "getVdis can not get VPD due to " + e.getMessage();
-            s_logger.warn(msg, e);
-        }
-        return vdis;
-    }
-
     protected String connect(Connection conn, final String vmName, final String ipAddress, final int port) {
         for (int i = 0; i <= _retry; i++) {
             try {
@@ -4214,7 +4178,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 return msg;
             }
             if (s_logger.isDebugEnabled()) {
-                s_logger.debug("Trying to connect to " + ipAddress);
+                s_logger.debug("Trying to connect to " + ipAddress + " attempt " + i + " of " + _retry);
             }
             if (pingdomr(conn, ipAddress, Integer.toString(port))) {
                 return null;
@@ -4851,7 +4815,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             XsLocalNetwork privateNic = getManagementNetwork(conn);
             _privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel;
             _host.privatePif = privateNic.getPifRecord(conn).uuid;
-            _host.privateNetwork = privateNic.getNetworkRecord(conn).uuid;           
+            _host.privateNetwork = privateNic.getNetworkRecord(conn).uuid;
             _host.systemvmisouuid = null;
 
             XsLocalNetwork guestNic = null;
@@ -5043,7 +5007,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             }
         } catch (Throwable e) {
             s_logger.warn("Check for master failed, failing the FULL Cluster sync command");
-        } 
+        }
 
         StartupStorageCommand sscmd = initializeLocalSR(conn);
         if (sscmd != null) {
@@ -5343,7 +5307,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 if(!isNetworkSetupByName(info.getPrivateNetworkName())){
                     msg = "For Physical Network id:"+ info.getPhysicalNetworkId() + ", Private Network is not configured on the backend by name " + info.getPrivateNetworkName();
                     errorout = true;
-                    break;               
+                    break;
                 }
                 if(!isNetworkSetupByName(info.getPublicNetworkName())){
                     msg = "For Physical Network id:"+ info.getPhysicalNetworkId() + ", Public Network is not configured on the backend by name " + info.getPublicNetworkName();
@@ -5386,7 +5350,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             }
         }
         return true;
-    }    
+    }
 
     protected List<File> getPatchFiles() {
         return null;
@@ -5710,9 +5674,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 return new OvsCreateTunnelAnswer(cmd, false, "Cannot create network", bridge);
             }
 
-            configureTunnelNetwork(conn, cmd.getNetworkId(), cmd.getFrom(), cmd.getKey());            
+            configureTunnelNetwork(conn, cmd.getNetworkId(), cmd.getFrom(), cmd.getKey());
             bridge = nw.getBridge(conn);
-            String result = callHostPlugin(conn, "ovstunnel", "create_tunnel", "bridge", bridge, "remote_ip", cmd.getRemoteIp(), 
+            String result = callHostPlugin(conn, "ovstunnel", "create_tunnel", "bridge", bridge, "remote_ip", cmd.getRemoteIp(),
                     "key", cmd.getKey().toString(), "from", cmd.getFrom().toString(), "to", cmd.getTo().toString());
             String[] res = result.split(":");
             if (res.length == 2 && res[0].equalsIgnoreCase("SUCCESS")) {
@@ -5819,7 +5783,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             PIF pif = nw.getPif(conn);
             Record pifRec = pif.getRecord(conn);
             s_logger.debug("PIF object:" + pifRec.uuid + "(" + pifRec.device + ")");
-            return new OvsFetchInterfaceAnswer(cmd, true, "Interface " + pifRec.device + " retrieved successfully", 
+            return new OvsFetchInterfaceAnswer(cmd, true, "Interface " + pifRec.device + " retrieved successfully",
                     pifRec.IP, pifRec.netmask, pifRec.MAC);
         } catch (BadServerResponse e) {
             s_logger.error("An error occurred while fetching the interface for " +
@@ -5878,7 +5842,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
         if (!_canBridgeFirewall) {
             s_logger.warn("Host " + _host.ip + " cannot do bridge firewalling");
-            return new SecurityGroupRuleAnswer(cmd, false, 
+            return new SecurityGroupRuleAnswer(cmd, false,
                     "Host " + _host.ip + " cannot do bridge firewalling",
                     SecurityGroupRuleAnswer.FailureReason.CANNOT_BRIDGE_FIREWALL);
         }
@@ -5944,7 +5908,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
             if( hr.softwareVersion.get("product_version_text_short") != null ) {
                 details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short"));
-                cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));                
+                cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));
 
                 cmd.setHypervisorVersion(_host.product_version);
             }
@@ -6119,7 +6083,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     }
 
     protected StorageSubsystemCommandHandler getStorageHandler() {
-    	XenServerStorageProcessor processor = new XenServerStorageProcessor(this);
+        XenServerStorageProcessor processor = new XenServerStorageProcessor(this);
         return new StorageSubsystemCommandHandlerBase(processor);
     }
 
@@ -6337,7 +6301,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     }
 
     protected SR getIscsiSR(Connection conn, String srNameLabel, String target, String path,
-    		String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) {
+            String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) {
         synchronized (srNameLabel.intern()) {
             Map<String, String> deviceConfig = new HashMap<String, String>();
             try {
@@ -6387,7 +6351,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 deviceConfig.put("targetIQN", targetiqn);
 
                 if (StringUtils.isNotBlank(chapInitiatorUsername) &&
-                    StringUtils.isNotBlank(chapInitiatorPassword)) {
+                        StringUtils.isNotBlank(chapInitiatorPassword)) {
                     deviceConfig.put("chapuser", chapInitiatorUsername);
                     deviceConfig.put("chappassword", chapInitiatorPassword);
                 }
@@ -6445,7 +6409,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     created[0] = true; // note that the SR was created (as opposed to introduced)
                 } else {
                     sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel,
-                        type, "user", true, smConfig);
+                            type, "user", true, smConfig);
 
                     Set<Host> setHosts = Host.getAll(conn);
 
@@ -6622,7 +6586,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
     // for about 1 GiB of physical size, about 4 MiB seems to be used for metadata
     private long getMetadata(long physicalSize) {
-    	return (long)(physicalSize * 0.00390625); // 1 GiB / 4 MiB = 0.00390625
+        return (long)(physicalSize * 0.00390625); // 1 GiB / 4 MiB = 0.00390625
     }
 
     protected VDI handleSrAndVdiAttach(String iqn, String storageHostName,
@@ -6634,8 +6598,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         Boolean[] created = { false };
 
         SR sr = getIscsiSR(conn, iqn,
-                    storageHostName, iqn,
-                    chapInitiatorName, chapInitiatorPassword, created);
+                storageHostName, iqn,
+                chapInitiatorName, chapInitiatorPassword, created);
 
         // if created[0] is true, this means the SR was actually created...as opposed to introduced
         if (created[0]) {
@@ -6647,7 +6611,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             vdir.virtualSize = sr.getPhysicalSize(conn) - sr.getPhysicalUtilisation(conn) - getMetadata(sr.getPhysicalSize(conn));
 
             if (vdir.virtualSize < 0) {
-            	throw new Exception("VDI virtual size cannot be less than 0.");
+                throw new Exception("VDI virtual size cannot be less than 0.");
             }
 
             vdi = VDI.create(conn, vdir);
@@ -6656,7 +6620,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             vdi = sr.getVDIs(conn).iterator().next();
         }
 
-    	return vdi;
+        return vdi;
     }
 
     protected void handleSrAndVdiDetach(String iqn) throws Exception {
@@ -6804,7 +6768,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             Set<VM> vmSnapshots = VM.getByNameLabel(conn, cmd.getTarget().getSnapshotName());
             if(vmSnapshots.size() > 0)
                 return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs());
-            
+
             // check if there is already a task for this VM snapshot
             Task task = null;
             Set<Task> tasks = Task.getByNameLabel(conn, "Async.VM.snapshot");
@@ -6817,7 +6781,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     }
                 }
             }
-            
+
             // create a new task if there is no existing task for this VM snapshot
             if(task == null){
                 try {
@@ -6827,13 +6791,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                         vm = createWorkingVM(conn, vmName, guestOSType, listVolumeTo);
                     }
                 }
-    
+
                 if (vm == null) {
                     return new CreateVMSnapshotAnswer(cmd, false,
                             "Creating VM Snapshot Failed due to can not find vm: "
                                     + vmName);
                 }
-                
+
                 // call Xenserver API
                 if (!snapshotMemory) {
                     task = vm.snapshotAsync(conn, vmSnapshotName);
@@ -6853,15 +6817,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 }
                 task.addToOtherConfig(conn, "CS_VM_SNAPSHOT_KEY", vmSnapshotName);
             }
-            
+
             waitForTask(conn, task, 1000, timeout * 1000);
             checkForSuccess(conn, task);
             String result = task.getResult(conn);
-            
-            // extract VM snapshot ref from result 
+
+            // extract VM snapshot ref from result
             String ref = result.substring("<value>".length(), result.length() - "</value>".length());
             vmSnapshot = Types.toVM(ref);
-            
+
             success = true;
             return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs());
         } catch (Exception e) {
@@ -6906,11 +6870,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             }
         }
     }
-    
+
     private VM createWorkingVM(Connection conn, String vmName,
             String guestOSType, List<VolumeTO> listVolumeTo)
-            throws BadServerResponse, VmBadPowerState, SrFull,
-            OperationNotAllowed, XenAPIException, XmlRpcException {
+                    throws BadServerResponse, VmBadPowerState, SrFull,
+                    OperationNotAllowed, XenAPIException, XmlRpcException {
         String guestOsTypeName = getGuestOsType(guestOSType, false);
         if (guestOsTypeName == null) {
             String msg = " Hypervisor " + this.getClass().getName()
@@ -6956,7 +6920,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     protected Answer execute(final DeleteVMSnapshotCommand cmd) {
         String snapshotName = cmd.getTarget().getSnapshotName();
         Connection conn = getConnection();
-                
+
         try {
             List<VDI> vdiList = new ArrayList<VDI>();
             Set<VM> snapshots = VM.getByNameLabel(conn, snapshotName);
@@ -6985,7 +6949,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             return new DeleteVMSnapshotAnswer(cmd, false, e.getMessage());
         }
     }
-    
+
     protected Answer execute(final AttachIsoCommand cmd) {
         Connection conn = getConnection();
         boolean attach = cmd.isAttach();
@@ -7400,7 +7364,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                         } finally {
                             deleteSnapshotBackup(conn, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotBackupUuid);
                         }
-                    }                    
+                    }
                     success = true;
                 } finally {
                     if( snapshotSr != null) {
@@ -7807,7 +7771,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     }
 
     protected boolean deleteSnapshotsDir(Connection conn, Long dcId, Long accountId, Long volumeId, String secondaryStorageMountPath) {
-        return deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, "snapshots" + "/" + accountId.toString() + "/" + volumeId.toString());             
+        return deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, "snapshots" + "/" + accountId.toString() + "/" + volumeId.toString());
     }
 
 
@@ -8100,7 +8064,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         } catch (Throwable e) {
             s_logger.warn("Check for master failed, failing the Cluster sync command");
             return  new Answer(cmd);
-        } 
+        }
         HashMap<String, Pair<String, State>> newStates = deltaClusterSync(conn);
         return new ClusterSyncAnswer(cmd.getClusterId(), newStates);
     }
@@ -8129,10 +8093,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 }
                 if (s_logger.isTraceEnabled()) {
                     s_logger.trace("VM " + vm_name + ": powerstate = " + ps + "; vm state=" + state.toString());
-                } 
+                }
             }
         } catch (final Throwable e) {
-            String msg = "Unable to get vms through host " + _host.uuid + " due to to " + e.toString();      
+            String msg = "Unable to get vms through host " + _host.uuid + " due to to " + e.toString();
             s_logger.warn(msg, e);
             throw new CloudRuntimeException(msg);
         }
@@ -8189,7 +8153,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     s_logger.warn("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm);
                     changes.put(vm, new Pair<String, State>(host_uuid, newState));
                 } else if (oldState.second() == State.Starting) {
-                    if (newState == State.Running) { 
+                    if (newState == State.Running) {
                         s_logger.debug("12. The VM " + vm + " is in " + State.Running + " state");
                         s_vms.put(_cluster, host_uuid, vm, newState);
                     } else if (newState == State.Stopped) {
@@ -8603,25 +8567,25 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
-	@Override
-	public void setName(String name) {
-	}
+    @Override
+    public void setName(String name) {
+    }
 
-	@Override
-	public void setConfigParams(Map<String, Object> params) {
-	}
+    @Override
+    public void setConfigParams(Map<String, Object> params) {
+    }
 
-	@Override
-	public Map<String, Object> getConfigParams() {
-		return null;
-	}
+    @Override
+    public Map<String, Object> getConfigParams() {
+        return null;
+    }
 
-	@Override
-	public int getRunLevel() {
-		return 0;
-	}
+    @Override
+    public int getRunLevel() {
+        return 0;
+    }
 
-	@Override
-	public void setRunLevel(int level) {
-	}
+    @Override
+    public void setRunLevel(int level) {
+    }
 }


[43/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3282:[GSLB] Unable to add multiple LB rules to same GSLB rule

-fixing regression due to adding GSLB monitor for GSLB service

-code to add/delete GSLB monitor and GSLB service-monitor binding is
made idempotenet so as to succeed if the resource exists.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e07a8b3f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e07a8b3f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e07a8b3f

Branch: refs/heads/vmsync
Commit: e07a8b3f513d695ec639e261d604bdf7bc8dc6f4
Parents: f980341
Author: Murali Reddy <mu...@gmail.com>
Authored: Mon Jul 1 18:59:14 2013 +0530
Committer: Murali Reddy <mu...@gmail.com>
Committed: Mon Jul 1 19:07:00 2013 +0530

----------------------------------------------------------------------
 .../network/resource/NetscalerResource.java     | 121 ++++++++++++++-----
 .../GlobalLoadBalancingRulesServiceImpl.java    |   5 +-
 2 files changed, 92 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e07a8b3f/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java
index 263e13b..d25d416 100644
--- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java
+++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java
@@ -905,46 +905,27 @@ public class NetscalerResource implements ServerResource {
                             GSLB.createVserverServiceBinding(_netscalerService, serviceName, vserverName);
 
                             // create a monitor for the service running on the site
-                            lbmonitor newmonitor = new lbmonitor();
-                            String monitorName =  GSLB.generateGslbServiceMonitorName(servicePublicIp);
-                            newmonitor.set_type("TCP");
-                            newmonitor.set_servicename(serviceName);
-                            newmonitor.set_monitorname(monitorName);
-                            newmonitor.set_state("ENABLED");
-                            lbmonitor.add(_netscalerService, newmonitor);
+                            GSLB.createGslbServiceMonitor(_netscalerService, servicePublicIp, serviceName);
 
-                            // bind the monitor to the GSLB servie
-                            try {
-                                gslbservice_lbmonitor_binding monitorBinding = new gslbservice_lbmonitor_binding();
-                                monitorBinding.set_monitor_name(monitorName);
-                                monitorBinding.set_servicename(serviceName);
-                                gslbservice_lbmonitor_binding.add(_netscalerService, monitorBinding);
-                            } catch (Exception e) {
-                                // TODO: Nitro API version 10.* is not compatible for NetScalers 9.*, so may fail
-                                // against NetScaler version lesser than 10 hence ignore the exception
-                                s_logger.warn("Failed to bind monitor to GSLB service due to " + e.getMessage());
-                            }
+                            // bind the monitor to the GSLB service
+                            GSLB.createGslbServiceGslbMonitorBinding(_netscalerService, servicePublicIp, serviceName);
 
                         } else {
+
+                            String monitorName =  GSLB.generateGslbServiceMonitorName(servicePublicIp);
+
+                            // delete GSLB service and GSLB monitor binding
+                            GSLB.deleteGslbServiceGslbMonitorBinding(_netscalerService, monitorName, serviceName);
+
+                            // delete the GSLB service monitor
+                            GSLB.deleteGslbServiceMonitor(_netscalerService, monitorName);
+
                             // Unbind GSLB service with GSLB virtual server
                             GSLB.deleteVserverServiceBinding(_netscalerService, serviceName, vserverName);
 
                             // delete 'gslbservice' object
                             gslbservice service = GSLB.getServiceObject(_netscalerService, serviceName);
                             GSLB.deleteService(_netscalerService, serviceName);
-
-                            // delete the GSLB service monitor
-                            String monitorName =  GSLB.generateGslbServiceMonitorName(servicePublicIp);
-                            try {
-                                lbmonitor serviceMonitor = lbmonitor.get(_netscalerService, monitorName);
-                                if (serviceMonitor != null) {
-                                    lbmonitor.delete(_netscalerService, serviceMonitor);
-                                }
-                            } catch (nitro_exception ne) {
-                                if (ne.getErrorCode() != NitroError.NS_RESOURCE_NOT_EXISTS) {
-                                    s_logger.warn("Failed to delete monitor "+ monitorName + " for GSLB service due to " + ne.getMessage());
-                                }
-                            }
                         }
 
                         if (site.forRevoke()) { // delete the site if its for revoke
@@ -969,9 +950,16 @@ public class NetscalerResource implements ServerResource {
                         String servicePublicIp = site.getServicePublicIp();
                         String servicePublicPort = site.getServicePort();
                         String siteName = GSLB.generateUniqueSiteName(sitePrivateIP, sitePublicIP, site.getDataCenterId());
+                        String serviceName = GSLB.generateUniqueServiceName(siteName, servicePublicIp, servicePublicPort);
+                        String monitorName =  GSLB.generateGslbServiceMonitorName(servicePublicIp);
+
+                        // delete GSLB service and GSLB monitor binding
+                        GSLB.deleteGslbServiceGslbMonitorBinding(_netscalerService, servicePublicIp, serviceName);
+
+                        // delete the GSLB service monitor
+                        GSLB.deleteGslbServiceMonitor(_netscalerService, monitorName);
 
                         // remove binding between virtual server and services
-                        String serviceName = GSLB.generateUniqueServiceName(siteName, servicePublicIp, servicePublicPort);
                         GSLB.deleteVserverServiceBinding(_netscalerService, serviceName, vserverName);
 
                         // delete service object
@@ -1460,6 +1448,75 @@ public class NetscalerResource implements ServerResource {
             }
         }
 
+        private static void createGslbServiceMonitor(nitro_service nsService, String servicePublicIp,
+                                                     String serviceName) throws ExecutionException {
+            try {
+                lbmonitor newmonitor = new lbmonitor();
+                String monitorName =  generateGslbServiceMonitorName(servicePublicIp);
+                newmonitor.set_type("TCP");
+                newmonitor.set_servicename(serviceName);
+                newmonitor.set_monitorname(monitorName);
+                newmonitor.set_state("ENABLED");
+                lbmonitor.add(nsService, newmonitor);
+            } catch (nitro_exception ne) {
+                if (ne.getErrorCode() == NitroError.NS_RESOURCE_EXISTS) {
+                    return;
+                }
+            } catch (Exception e) {
+                String errMsg = "Failed to create GSLB monitor for service public ip" + servicePublicIp;
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug(errMsg);
+                }
+                throw new ExecutionException(errMsg);
+            }
+        }
+
+        private static void deleteGslbServiceMonitor(nitro_service nsService, String monitorName)
+                throws ExecutionException {
+            try {
+                lbmonitor serviceMonitor = lbmonitor.get(nsService, monitorName);
+                if (serviceMonitor != null) {
+                    lbmonitor.delete(nsService, serviceMonitor);
+                }
+            } catch (nitro_exception ne) {
+                if (ne.getErrorCode() != NitroError.NS_RESOURCE_NOT_EXISTS) {
+                    String errMsg = "Failed to delete monitor "+ monitorName + " for GSLB service due to " + ne.getMessage();
+                    s_logger.debug(errMsg);
+                    throw new com.cloud.utils.exception.ExecutionException(errMsg);
+                }
+            } catch (Exception e) {
+                String errMsg = "Failed to delete monitor "+ monitorName + " for GSLB service due to " + e.getMessage();
+                s_logger.debug(errMsg);
+                throw new com.cloud.utils.exception.ExecutionException(errMsg);
+            }
+        }
+
+        private static void createGslbServiceGslbMonitorBinding(nitro_service nsService, String servicePublicIp,
+                                                            String serviceName) {
+            try {
+                String monitorName =  GSLB.generateGslbServiceMonitorName(servicePublicIp);
+                gslbservice_lbmonitor_binding monitorBinding = new gslbservice_lbmonitor_binding();
+                monitorBinding.set_monitor_name(monitorName);
+                monitorBinding.set_servicename(serviceName);
+                gslbservice_lbmonitor_binding.add(nsService, monitorBinding);
+            } catch (Exception e) {
+                // TODO: Nitro API version 10.* is not compatible for NetScalers 9.*, so may fail
+                // against NetScaler version lesser than 10 hence ignore the exception
+                s_logger.warn("Failed to bind monitor to GSLB service due to " + e.getMessage());
+            }
+        }
+
+        private static void deleteGslbServiceGslbMonitorBinding(nitro_service nsService, String monitorName,
+                                                                String serviceName) {
+            try {
+                gslbservice_lbmonitor_binding[] monitorBindings = gslbservice_lbmonitor_binding.get(nsService, serviceName);
+                gslbservice_lbmonitor_binding.delete(nsService, monitorBindings);
+            } catch (Exception e) {
+                s_logger.warn("Failed to delet GSLB monitor " + monitorName + "and GSLB service " +  serviceName +
+                        " binding due to " + e.getMessage());
+            }
+        }
+
         // get 'gslbsite' object corresponding to a site name
         private static gslbsite getSiteObject(nitro_service client, String siteName) {
             try {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e07a8b3f/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java
index 483c19a..0642390 100644
--- a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java
+++ b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java
@@ -648,8 +648,9 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR
             try {
                 _gslbProvider.applyGlobalLoadBalancerRule(zoneId.first(), zoneId.second(), gslbConfigCmd);
             } catch (ResourceUnavailableException e) {
-                s_logger.warn("Failed to configure GSLB rul in the zone " + zoneId + " due to " + e.getMessage());
-                throw new CloudRuntimeException("Failed to configure GSLB rul in the zone");
+                String msg =  "Failed to configure GSLB rule in the zone " + zoneId.first() + " due to " + e.getMessage();
+                s_logger.warn(msg);
+                throw new CloudRuntimeException(msg);
             }
         }
 


[09/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Improve logging for CitrixResourceBase by replacing e.printStackTrace
with s_Logger.error.

Automatic cleanup using the eclipse settings from Alex

Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/ed004279
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/ed004279
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/ed004279

Branch: refs/heads/vmsync
Commit: ed0042798924058ec5b62acb9a39b97afca80c25
Parents: 99227f7
Author: Hugo Trippaers <ht...@schubergphilis.com>
Authored: Fri Jun 28 16:45:31 2013 -0700
Committer: Hugo Trippaers <ht...@schubergphilis.com>
Committed: Fri Jun 28 16:45:31 2013 -0700

----------------------------------------------------------------------
 .../xen/resource/CitrixResourceBase.java        | 237 ++++++++-----------
 1 file changed, 104 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/ed004279/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index d9c357d..4abf127 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -53,13 +53,6 @@ import javax.ejb.Local;
 import javax.naming.ConfigurationException;
 import javax.xml.parsers.DocumentBuilderFactory;
 
-import com.cloud.agent.api.*;
-import com.cloud.agent.api.to.*;
-import com.cloud.network.rules.FirewallRule;
-
-import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
-import org.apache.cloudstack.storage.to.TemplateObjectTO;
-import org.apache.cloudstack.storage.to.VolumeObjectTO;
 import org.apache.log4j.Logger;
 import org.apache.xmlrpc.XmlRpcException;
 import org.w3c.dom.Document;
@@ -67,6 +60,45 @@ import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 
+import com.trilead.ssh2.SCPClient;
+import com.xensource.xenapi.Bond;
+import com.xensource.xenapi.Connection;
+import com.xensource.xenapi.Console;
+import com.xensource.xenapi.Host;
+import com.xensource.xenapi.HostCpu;
+import com.xensource.xenapi.HostMetrics;
+import com.xensource.xenapi.Network;
+import com.xensource.xenapi.PBD;
+import com.xensource.xenapi.PIF;
+import com.xensource.xenapi.PIF.Record;
+import com.xensource.xenapi.Pool;
+import com.xensource.xenapi.SR;
+import com.xensource.xenapi.Session;
+import com.xensource.xenapi.Task;
+import com.xensource.xenapi.Types;
+import com.xensource.xenapi.Types.BadAsyncResult;
+import com.xensource.xenapi.Types.BadServerResponse;
+import com.xensource.xenapi.Types.ConsoleProtocol;
+import com.xensource.xenapi.Types.IpConfigurationMode;
+import com.xensource.xenapi.Types.OperationNotAllowed;
+import com.xensource.xenapi.Types.SrFull;
+import com.xensource.xenapi.Types.VbdType;
+import com.xensource.xenapi.Types.VmBadPowerState;
+import com.xensource.xenapi.Types.VmPowerState;
+import com.xensource.xenapi.Types.XenAPIException;
+import com.xensource.xenapi.VBD;
+import com.xensource.xenapi.VBDMetrics;
+import com.xensource.xenapi.VDI;
+import com.xensource.xenapi.VIF;
+import com.xensource.xenapi.VLAN;
+import com.xensource.xenapi.VM;
+import com.xensource.xenapi.VMGuestMetrics;
+import com.xensource.xenapi.XenAPIObject;
+
+import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+
 import com.cloud.agent.IAgentControl;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.AttachIsoCommand;
@@ -208,8 +240,12 @@ import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
 import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
 import com.cloud.agent.api.storage.ResizeVolumeAnswer;
 import com.cloud.agent.api.storage.ResizeVolumeCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
 import com.cloud.agent.api.to.FirewallRuleTO;
 import com.cloud.agent.api.to.IpAddressTO;
+import com.cloud.agent.api.to.NfsTO;
 import com.cloud.agent.api.to.NicTO;
 import com.cloud.agent.api.to.PortForwardingRuleTO;
 import com.cloud.agent.api.to.S3TO;
@@ -264,82 +300,6 @@ import com.cloud.vm.DiskProfile;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachine.State;
 import com.cloud.vm.snapshot.VMSnapshot;
-import com.trilead.ssh2.SCPClient;
-import com.xensource.xenapi.Bond;
-import com.xensource.xenapi.Connection;
-import com.xensource.xenapi.Console;
-import com.xensource.xenapi.Host;
-import com.xensource.xenapi.HostCpu;
-import com.xensource.xenapi.HostMetrics;
-import com.xensource.xenapi.Network;
-import com.xensource.xenapi.PBD;
-import com.xensource.xenapi.PIF;
-import com.xensource.xenapi.PIF.Record;
-import com.xensource.xenapi.Pool;
-import com.xensource.xenapi.SR;
-import com.xensource.xenapi.Session;
-import com.xensource.xenapi.Task;
-import com.xensource.xenapi.Types;
-import com.xensource.xenapi.Types.BadAsyncResult;
-import com.xensource.xenapi.Types.BadServerResponse;
-import com.xensource.xenapi.Types.ConsoleProtocol;
-import com.xensource.xenapi.Types.IpConfigurationMode;
-import com.xensource.xenapi.Types.OperationNotAllowed;
-import com.xensource.xenapi.Types.SrFull;
-import com.xensource.xenapi.Types.VbdType;
-import com.xensource.xenapi.Types.VmBadPowerState;
-import com.xensource.xenapi.Types.VmPowerState;
-import com.xensource.xenapi.Types.XenAPIException;
-import com.xensource.xenapi.VBD;
-import com.xensource.xenapi.VBDMetrics;
-import com.xensource.xenapi.VDI;
-import com.xensource.xenapi.VIF;
-import com.xensource.xenapi.VLAN;
-import com.xensource.xenapi.VM;
-import com.xensource.xenapi.VMGuestMetrics;
-import com.xensource.xenapi.XenAPIObject;
-import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
-import org.apache.log4j.Logger;
-import org.apache.xmlrpc.XmlRpcException;
-import org.w3c.dom.Document;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-
-import javax.ejb.Local;
-import javax.naming.ConfigurationException;
-import javax.xml.parsers.DocumentBuilderFactory;
-import java.beans.BeanInfo;
-import java.beans.IntrospectionException;
-import java.beans.Introspector;
-import java.beans.PropertyDescriptor;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.StringReader;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Queue;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
 
 /**
  * CitrixResourceBase encapsulates the calls to the XenServer Xapi process
@@ -430,7 +390,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
     }
 
     public XsHost getHost() {
-        return this._host;
+        return _host;
     }
 
     protected boolean cleanupHaltedVms(Connection conn) throws XenAPIException, XmlRpcException {
@@ -665,7 +625,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         } else if (clazz == CheckS2SVpnConnectionsCommand.class) {
             return execute((CheckS2SVpnConnectionsCommand) cmd);
         } else if (cmd instanceof StorageSubSystemCommand) {
-            return this.storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd);
+            return storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd);
         } else if (clazz == CreateVMSnapshotCommand.class) {
             return execute((CreateVMSnapshotCommand)cmd);
         } else if (clazz == DeleteVMSnapshotCommand.class) {
@@ -753,7 +713,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             }
 
             for (VM vm : vms) {
-                VM.Record vmr = vm.getRecord(conn);
+                vm.getRecord(conn);
                 try {
                     scaleVM(conn, vm, vmSpec, host);
                 } catch (Exception e) {
@@ -964,12 +924,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     vswitchNw = networks.iterator().next();
                 }
 
-                enableXenServerNetwork(conn, vswitchNw, "vswitch", "vswicth network");
+                enableXenServerNetwork(conn, vswitchNw, "vswitch", "vswitch network");
                 _host.vswitchNetwork = vswitchNw;
             }
             return _host.vswitchNetwork;
-        } catch (Exception e) {
-            e.printStackTrace();
+        } catch (BadServerResponse e) {
+            s_logger.error("Failed to setup vswitch network", e);
+        } catch (XenAPIException e) {
+            s_logger.error("Failed to setup vswitch network", e);
+        } catch (XmlRpcException e) {
+            s_logger.error("Failed to setup vswitch network", e);
         }
 
         return null;
@@ -3040,14 +3004,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 try {
                     host_uuid = host.getUuid(conn);
                 } catch (BadServerResponse e) {
-                    // TODO Auto-generated catch block
-                    e.printStackTrace();
+                    s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                 } catch (XenAPIException e) {
-                    // TODO Auto-generated catch block
-                    e.printStackTrace();
+                    s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                 } catch (XmlRpcException e) {
-                    // TODO Auto-generated catch block
-                    e.printStackTrace();
+                    s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
                 }
                 vmStates.put(record.nameLabel, new Pair<String, State>(host_uuid, state));
             }
@@ -3539,7 +3500,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 s_logger.warn("There are no Consoles available to the vm : " + record.nameDescription);
                 return -1;
             }
-            Iterator<Console> i = consoles.iterator();
+            consoles.iterator();
         } catch (XenAPIException e) {
             String msg = "Unable to get vnc-port due to " + e.toString();
             s_logger.warn(msg, e);
@@ -5778,39 +5739,36 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             } else {
                 return new Answer(cmd, false, result);
             }
-        } catch (Exception e) {
-            e.printStackTrace();
+        } catch (BadServerResponse e) {
+            s_logger.error("Failed to delete flow", e);
+        } catch (XenAPIException e) {
+            s_logger.error("Failed to delete flow", e);
+        } catch (XmlRpcException e) {
+            s_logger.error("Failed to delete flow", e);
         }
         return new Answer(cmd, false, "failed to delete flow for " + cmd.getVmName());
     }
 
     private List<Pair<String, Long>> ovsFullSyncStates() {
         Connection conn = getConnection();
-        try {
-            String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.uuid);
-            String [] logs = result != null ?result.split(";"): new String [0];
-            List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>();
-            for (String log: logs){
-                String [] info = log.split(",");
-                if (info.length != 5) {
-                    s_logger.warn("Wrong element number in ovs log(" + log +")");
-                    continue;
-                }
-
-                //','.join([bridge, vmName, vmId, seqno, tag])
-                try {
-                    states.add(new Pair<String,Long>(info[0], Long.parseLong(info[3])));
-                } catch (NumberFormatException nfe) {
-                    states.add(new Pair<String,Long>(info[0], -1L));
-                }
+        String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.uuid);
+        String [] logs = result != null ?result.split(";"): new String [0];
+        List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>();
+        for (String log: logs){
+            String [] info = log.split(",");
+            if (info.length != 5) {
+                s_logger.warn("Wrong element number in ovs log(" + log +")");
+                continue;
             }
 
-            return states;
-        } catch (Exception e) {
-            e.printStackTrace();
+            //','.join([bridge, vmName, vmId, seqno, tag])
+            try {
+                states.add(new Pair<String,Long>(info[0], Long.parseLong(info[3])));
+            } catch (NumberFormatException nfe) {
+                states.add(new Pair<String,Long>(info[0], -1L));
+            }
         }
-
-        return null;
+        return states;
     }
 
     private OvsSetTagAndFlowAnswer execute(OvsSetTagAndFlowCommand cmd) {
@@ -5835,8 +5793,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             } else {
                 return new OvsSetTagAndFlowAnswer(cmd, false, result);
             }
-        } catch (Exception e) {
-            e.printStackTrace();
+        } catch (BadServerResponse e) {
+            s_logger.error("Failed to set tag and flow", e);
+        } catch (XenAPIException e) {
+            s_logger.error("Failed to set tag and flow", e);
+        } catch (XmlRpcException e) {
+            s_logger.error("Failed to set tag and flow", e);
         }
 
         return new OvsSetTagAndFlowAnswer(cmd, false, "EXCEPTION");
@@ -5849,18 +5811,24 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         s_logger.debug("Will look for network with name-label:" + label + " on host " + _host.ip);
         Connection conn = getConnection();
         try {
-            XsLocalNetwork nw = this.getNetworkByName(conn, label);
+            XsLocalNetwork nw = getNetworkByName(conn, label);
             s_logger.debug("Network object:" + nw.getNetwork().getUuid(conn));
             PIF pif = nw.getPif(conn);
             Record pifRec = pif.getRecord(conn);
             s_logger.debug("PIF object:" + pifRec.uuid + "(" + pifRec.device + ")");
             return new OvsFetchInterfaceAnswer(cmd, true, "Interface " + pifRec.device + " retrieved successfully", 
                     pifRec.IP, pifRec.netmask, pifRec.MAC);
-        } catch (Exception e) {
-            e.printStackTrace();
+        } catch (BadServerResponse e) {
+            s_logger.error("An error occurred while fetching the interface for " +
+                    label + " on host " + _host.ip , e);
+            return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
+        } catch (XenAPIException e) {
             s_logger.error("An error occurred while fetching the interface for " +
-                    label + " on host " + _host.ip + ":" + e.toString() + 
-                    "(" + e.getClass() + ")");
+                    label + " on host " + _host.ip , e);
+            return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
+        } catch (XmlRpcException e) {
+            s_logger.error("An error occurred while fetching the interface for " +
+                    label + " on host " + _host.ip, e);
             return new OvsFetchInterfaceAnswer(cmd, false, "EXCEPTION:" + e.getMessage());
         }
     }
@@ -5885,12 +5853,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             } else {
                 return new OvsCreateGreTunnelAnswer(cmd, true, result, _host.ip, bridge, Integer.parseInt(res[1]));
             }
-        } catch (Exception e) {
-            e.printStackTrace();
+        } catch (BadServerResponse e) {
             s_logger.error("An error occurred while creating a GRE tunnel to " +
-                    cmd.getRemoteIp() + " on host " + _host.ip + ":" + e.getMessage() + 
-                    "(" + e.getClass() + ")");
-
+                    cmd.getRemoteIp() + " on host " + _host.ip , e);
+        } catch (XenAPIException e) {
+            s_logger.error("An error occurred while creating a GRE tunnel to " +
+                    cmd.getRemoteIp() + " on host " + _host.ip , e);
+        } catch (XmlRpcException e) {
+            s_logger.error("An error occurred while creating a GRE tunnel to " +
+                    cmd.getRemoteIp() + " on host " + _host.ip , e);
         }
 
         return new OvsCreateGreTunnelAnswer(cmd, false, "EXCEPTION", _host.ip, bridge);
@@ -6139,7 +6110,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
         CheckXenHostInfo();
 
-        this.storageHandler = getStorageHandler();
+        storageHandler = getStorageHandler();
         return true;
 
     }


[07/50] [abbrv] SolidFire plug-in and related changes

Posted by ah...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 5c51585..52f4190 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -36,6 +36,9 @@ import java.util.Map;
 import java.util.Random;
 import java.util.TimeZone;
 import java.util.UUID;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Map.Entry;
 
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
@@ -216,6 +219,8 @@ import com.cloud.hypervisor.vmware.mo.HostMO;
 import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
 import com.cloud.hypervisor.vmware.mo.NetworkDetails;
 import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
+import com.cloud.hypervisor.vmware.mo.HostDatastoreSystemMO;
+import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO;
 import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
 import com.cloud.hypervisor.vmware.mo.VirtualSwitchType;
 import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
@@ -229,7 +234,6 @@ import com.cloud.network.HAProxyConfigurator;
 import com.cloud.network.LoadBalancerConfigurator;
 import com.cloud.network.Networks;
 import com.cloud.network.Networks.BroadcastDomainType;
-import com.cloud.network.Networks.IsolationType;
 import com.cloud.network.Networks.TrafficType;
 import com.cloud.network.VmwareTrafficLabel;
 import com.cloud.network.rules.FirewallRule;
@@ -272,8 +276,16 @@ import com.vmware.vim25.GuestInfo;
 import com.vmware.vim25.HostCapability;
 import com.vmware.vim25.HostFirewallInfo;
 import com.vmware.vim25.HostFirewallRuleset;
-import com.vmware.vim25.HostNetworkTrafficShapingPolicy;
-import com.vmware.vim25.HostPortGroupSpec;
+import com.vmware.vim25.HostHostBusAdapter;
+import com.vmware.vim25.HostInternetScsiTargetTransport;
+import com.vmware.vim25.HostScsiTopology;
+import com.vmware.vim25.HostInternetScsiHba;
+import com.vmware.vim25.HostInternetScsiHbaAuthenticationProperties;
+import com.vmware.vim25.HostInternetScsiHbaStaticTarget;
+import com.vmware.vim25.HostScsiDisk;
+import com.vmware.vim25.HostScsiTopologyInterface;
+import com.vmware.vim25.HostScsiTopologyLun;
+import com.vmware.vim25.HostScsiTopologyTarget;
 import com.vmware.vim25.ManagedObjectReference;
 import com.vmware.vim25.ObjectContent;
 import com.vmware.vim25.OptionValue;
@@ -304,10 +316,6 @@ import com.vmware.vim25.VirtualMachineRelocateSpecDiskLocator;
 import com.vmware.vim25.VirtualMachineRuntimeInfo;
 import com.vmware.vim25.VirtualSCSISharing;
 
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Map.Entry;
-
 public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService {
     private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
 
@@ -593,7 +601,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
 
         try {
             VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
-            ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
             // find VM through datacenter (VM is not at the target host yet)
             VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
             if (vmMo == null) {
@@ -3244,7 +3251,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             HashMap<String, State> newStates = getVmStates();
 
             List<String> requestedVmNames = cmd.getVmNames();
-            List<String> vmNames = new ArrayList();
+            List<String> vmNames = new ArrayList<String>();
 
             if (requestedVmNames != null) {
                 for (String vmName : requestedVmNames) {
@@ -3750,8 +3757,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd));
         }
 
-        VmwareContext context = getServiceContext();
-        VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
         final String vmName = volMgr.getVmNameFromVolumeId(cmd.getVolumeId());
 
         VirtualMachineMO vmMo = null;
@@ -3903,6 +3908,45 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         }
     }
 
+    protected ManagedObjectReference handleDatastoreAndVmdk(AttachVolumeCommand cmd) throws Exception {
+        ManagedObjectReference morDs = null;
+
+        VmwareContext context = getServiceContext();
+        VmwareHypervisorHost hyperHost = getHyperHost(context);
+
+        String iqn = cmd.get_iScsiName();
+
+        if (cmd.getAttach()) {
+            morDs = createVmfsDatastore(hyperHost, iqn,
+                        cmd.getStorageHost(), cmd.getStoragePort(), iqn,
+                        cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(),
+                        cmd.getChapTargetUsername(), cmd.getChapTargetPassword());
+
+            DatastoreMO dsMo = new DatastoreMO(context, morDs);
+
+            String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName());
+
+            if (!dsMo.fileExists(volumeDatastorePath)) {
+                String dummyVmName = getWorkerName(context, cmd, 0);
+
+                VirtualMachineMO vmMo = prepareVolumeHostDummyVm(hyperHost, dsMo, dummyVmName);
+
+                if (vmMo == null) {
+                    throw new Exception("Unable to create a dummy VM for volume creation");
+                }
+
+                vmMo.createDisk(volumeDatastorePath, (int)(dsMo.getSummary().getFreeSpace() / (1024L * 1024L)),
+                		morDs, vmMo.getScsiDeviceControllerKey());
+                vmMo.detachDisk(volumeDatastorePath, false);
+        	}
+        }
+        else {
+        	deleteVmfsDatastore(hyperHost, iqn, cmd.getStorageHost(), cmd.getStoragePort(), iqn);
+        }
+
+    	return morDs;
+    }
+
     protected Answer execute(AttachVolumeCommand cmd) {
         if (s_logger.isInfoEnabled()) {
             s_logger.info("Executing resource AttachVolumeCommand: " + _gson.toJson(cmd));
@@ -3922,7 +3966,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
                 throw new Exception(msg);
             }
 
-            ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid());
+            ManagedObjectReference morDs = null;
+
+            if (cmd.getAttach() && cmd.isManaged()) {
+            	morDs = handleDatastoreAndVmdk(cmd);
+            }
+            else {
+            	morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid());
+            }
+
             if (morDs == null) {
                 String msg = "Unable to find the mounted datastore to execute AttachVolumeCommand, vmName: " + cmd.getVmName();
                 s_logger.error(msg);
@@ -3933,12 +3985,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true);
             assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations.";
 
-            AttachVolumeAnswer answer = new AttachVolumeAnswer(cmd, cmd.getDeviceId());
+            AttachVolumeAnswer answer = new AttachVolumeAnswer(cmd, cmd.getDeviceId(), datastoreVolumePath);
             if (cmd.getAttach()) {
                 vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs);
             } else {
                 vmMo.removeAllSnapshots();
                 vmMo.detachDisk(datastoreVolumePath, false);
+
+                if (cmd.isManaged()) {
+                	handleDatastoreAndVmdk(cmd);
+                }
             }
 
             return answer;
@@ -3954,6 +4010,198 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
         }
     }
 
+    private ManagedObjectReference createVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress,
+            int storagePortNumber, String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception {
+        VmwareContext context = getServiceContext();
+        ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
+        ClusterMO cluster = new ClusterMO(context, morCluster);
+        List<Pair<ManagedObjectReference, String>> lstHosts = cluster.getClusterHosts();
+
+        HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget();
+
+        target.setAddress(storageIpAddress);
+        target.setPort(storagePortNumber);
+        target.setIScsiName(iqn);
+
+        HostInternetScsiHbaAuthenticationProperties auth = new HostInternetScsiHbaAuthenticationProperties();
+
+        String strAuthType = "chapRequired";
+
+        auth.setChapAuthEnabled(true);
+        auth.setChapInherited(false);
+        auth.setChapAuthenticationType(strAuthType);
+        auth.setChapName(chapName);
+        auth.setChapSecret(chapSecret);
+        auth.setMutualChapInherited(false);
+        auth.setMutualChapAuthenticationType(strAuthType);
+        auth.setMutualChapName(mutualChapName);
+        auth.setMutualChapSecret(mutualChapSecret);
+
+        target.setAuthenticationProperties(auth);
+
+        final List<HostInternetScsiHbaStaticTarget> lstTargets = new ArrayList<HostInternetScsiHbaStaticTarget>();
+
+        lstTargets.add(target);
+
+        HostDatastoreSystemMO hostDatastoreSystem = null;
+        HostStorageSystemMO hostStorageSystem = null;
+
+        final List<Thread> threads = new ArrayList<Thread>();
+        final List<Exception> exceptions = new ArrayList<Exception>();
+
+        for (Pair<ManagedObjectReference, String> hostPair : lstHosts) {
+            HostMO host = new HostMO(context, hostPair.first());
+            hostDatastoreSystem = host.getHostDatastoreSystemMO();
+            hostStorageSystem = host.getHostStorageSystemMO();
+
+            boolean iScsiHbaConfigured = false;
+
+            for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
+                if (hba instanceof HostInternetScsiHba) {
+                    // just finding an instance of HostInternetScsiHba means that we have found at least one configured iSCSI HBA
+                    // at least one iSCSI HBA must be configured before a CloudStack user can use this host for iSCSI storage
+                    iScsiHbaConfigured = true;
+
+                    final String iScsiHbaDevice = hba.getDevice();
+
+                    final HostStorageSystemMO hss = hostStorageSystem;
+
+                    threads.add(new Thread() {
+                        public void run() {
+                            try {
+                                hss.addInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
+
+                                hss.rescanHba(iScsiHbaDevice);
+                            }
+                            catch (Exception ex) {
+                                synchronized (exceptions) {
+                                    exceptions.add(ex);
+                                }
+                            }
+                        }
+                    });
+                }
+            }
+
+            if (!iScsiHbaConfigured) {
+                throw new Exception("An iSCSI HBA must be configured before a host can use iSCSI storage.");
+            }
+        }
+
+        for (Thread thread : threads) {
+            thread.start();
+        }
+
+        for (Thread thread : threads) {
+            thread.join();
+        }
+
+        if (exceptions.size() > 0) {
+            throw new Exception(exceptions.get(0).getMessage());
+        }
+
+        ManagedObjectReference morDs = hostDatastoreSystem.findDatastore(iqn);
+
+        if (morDs != null) {
+            return morDs;
+        }
+
+        List<HostScsiDisk> lstHostScsiDisks = hostDatastoreSystem.queryAvailableDisksForVmfs();
+
+        HostScsiDisk hostScsiDisk = getHostScsiDisk(hostStorageSystem.getStorageDeviceInfo().getScsiTopology(), lstHostScsiDisks, iqn);
+
+        if (hostScsiDisk == null) {
+            throw new Exception("A relevant SCSI disk could not be located to use to create a datastore.");
+        }
+
+        return hostDatastoreSystem.createVmfsDatastore(datastoreName, hostScsiDisk);
+    }
+
+    // the purpose of this method is to find the HostScsiDisk in the passed-in array that exists (if any) because
+    // we added the static iqn to an iSCSI HBA
+    private static HostScsiDisk getHostScsiDisk(HostScsiTopology hst, List<HostScsiDisk> lstHostScsiDisks, String iqn) {
+        for (HostScsiTopologyInterface adapter : hst.getAdapter()) {
+            if (adapter.getTarget() != null) {
+                for (HostScsiTopologyTarget target : adapter.getTarget()) {
+                    if (target.getTransport() instanceof HostInternetScsiTargetTransport) {
+                        String iScsiName = ((HostInternetScsiTargetTransport)target.getTransport()).getIScsiName();
+
+                        if (iqn.equals(iScsiName)) {
+                            for (HostScsiDisk hostScsiDisk : lstHostScsiDisks) {
+                                for (HostScsiTopologyLun hstl : target.getLun()) {
+                                    if (hstl.getScsiLun().contains(hostScsiDisk.getUuid())) {
+                                        return hostScsiDisk;
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        return null;
+    }
+
+    private void deleteVmfsDatastore(VmwareHypervisorHost hyperHost, String volumeUuid,
+            String storageIpAddress, int storagePortNumber, String iqn) throws Exception {
+        // hyperHost.unmountDatastore(volumeUuid);
+
+        VmwareContext context = getServiceContext();
+        ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
+        ClusterMO cluster = new ClusterMO(context, morCluster);
+        List<Pair<ManagedObjectReference, String>> lstHosts = cluster.getClusterHosts();
+
+        HostInternetScsiHbaStaticTarget target = new HostInternetScsiHbaStaticTarget();
+
+        target.setAddress(storageIpAddress);
+        target.setPort(storagePortNumber);
+        target.setIScsiName(iqn);
+
+        final List<HostInternetScsiHbaStaticTarget> lstTargets = new ArrayList<HostInternetScsiHbaStaticTarget>();
+
+        lstTargets.add(target);
+
+        final List<Thread> threads = new ArrayList<Thread>();
+        final List<Exception> exceptions = new ArrayList<Exception>();
+
+        for (Pair<ManagedObjectReference, String> hostPair : lstHosts) {
+            final HostMO host = new HostMO(context, hostPair.first());
+            final HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO();
+
+            for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
+                if (hba instanceof HostInternetScsiHba) {
+                    final String iScsiHbaDevice = hba.getDevice();
+
+                    Thread thread = new Thread() {
+                        public void run() {
+                            try {
+                                hostStorageSystem.removeInternetScsiStaticTargets(iScsiHbaDevice, lstTargets);
+
+                                hostStorageSystem.rescanHba(iScsiHbaDevice);
+                            }
+                            catch (Exception ex) {
+                                exceptions.add(ex);
+                            }
+                        }
+                    };
+
+                    threads.add(thread);
+
+                    thread.start();
+                }
+            }
+        }
+
+        for (Thread thread : threads) {
+            thread.join();
+        }
+
+        if (exceptions.size() > 0) {
+            throw new Exception(exceptions.get(0).getMessage());
+        }
+    }
+
     protected Answer execute(AttachIsoCommand cmd) {
         if (s_logger.isInfoEnabled()) {
             s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd));

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index cbbec7c..d9c357d 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -4172,11 +4172,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                             for (VIF vif : vifs) {
                                 networks.add(vif.getNetwork(conn));
                             }
-                            List<VDI> vdis = getVdis(conn, vm);
                             vm.destroy(conn);
-                            for( VDI vdi : vdis ){
-                                umount(conn, vdi);
-                            }
                             state = State.Stopped;
                             SR sr = getISOSRbyVmName(conn, cmd.getVmName());
                             removeSR(conn, sr);
@@ -4479,7 +4475,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName);
     }
 
-    protected VDI mount(Connection conn, StoragePoolType pooltype, String volumeFolder, String volumePath) {
+    protected VDI mount(Connection conn, StoragePoolType poolType, String volumeFolder, String volumePath) {
         return getVDIbyUuid(conn, volumePath);
     }
 
@@ -5549,7 +5545,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             if (pool.getType() == StoragePoolType.NetworkFilesystem) {
                 getNfsSR(conn, pool);
             } else if (pool.getType() == StoragePoolType.IscsiLUN) {
-                getIscsiSR(conn, pool);
+                getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, new Boolean[1]);
             } else if (pool.getType() == StoragePoolType.PreSetup) {
             } else {
                 return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported.");
@@ -6229,7 +6225,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
     public Answer execute(ResizeVolumeCommand cmd) {
         Connection conn = getConnection();
-        StorageFilerTO pool = cmd.getPool();
         String volid = cmd.getPath();
         long newSize = cmd.getNewSize();
 
@@ -6367,19 +6362,18 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
-    protected SR getIscsiSR(Connection conn, StorageFilerTO pool) {
-        synchronized (pool.getUuid().intern()) {
+    protected SR getIscsiSR(Connection conn, String srNameLabel, String target, String path,
+    		String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) {
+        synchronized (srNameLabel.intern()) {
             Map<String, String> deviceConfig = new HashMap<String, String>();
             try {
-                String target = pool.getHost();
-                String path = pool.getPath();
                 if (path.endsWith("/")) {
                     path = path.substring(0, path.length() - 1);
                 }
 
                 String tmp[] = path.split("/");
                 if (tmp.length != 3) {
-                    String msg = "Wrong iscsi path " + pool.getPath() + " it should be /targetIQN/LUN";
+                    String msg = "Wrong iscsi path " + path + " it should be /targetIQN/LUN";
                     s_logger.warn(msg);
                     throw new CloudRuntimeException(msg);
                 }
@@ -6387,7 +6381,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 String lunid = tmp[2].trim();
                 String scsiid = "";
 
-                Set<SR> srs = SR.getByNameLabel(conn, pool.getUuid());
+                Set<SR> srs = SR.getByNameLabel(conn, srNameLabel);
                 for (SR sr : srs) {
                     if (!SRType.LVMOISCSI.equals(sr.getType(conn))) {
                         continue;
@@ -6412,19 +6406,24 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                     }
                     if (target.equals(dc.get("target")) && targetiqn.equals(dc.get("targetIQN")) && lunid.equals(dc.get("lunid"))) {
                         throw new CloudRuntimeException("There is a SR using the same configuration target:" + dc.get("target") +  ",  targetIQN:"
-                                + dc.get("targetIQN")  + ", lunid:" + dc.get("lunid") + " for pool " + pool.getUuid() + "on host:" + _host.uuid);
+                                + dc.get("targetIQN")  + ", lunid:" + dc.get("lunid") + " for pool " + srNameLabel + "on host:" + _host.uuid);
                     }
                 }
                 deviceConfig.put("target", target);
                 deviceConfig.put("targetIQN", targetiqn);
 
+                if (StringUtils.isNotBlank(chapInitiatorUsername) &&
+                    StringUtils.isNotBlank(chapInitiatorPassword)) {
+                    deviceConfig.put("chapuser", chapInitiatorUsername);
+                    deviceConfig.put("chappassword", chapInitiatorPassword);
+                }
+
                 Host host = Host.getByUuid(conn, _host.uuid);
                 Map<String, String> smConfig = new HashMap<String, String>();
                 String type = SRType.LVMOISCSI.toString();
-                String poolId = Long.toString(pool.getId());
                 SR sr = null;
                 try {
-                    sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), poolId, type, "user", true,
+                    sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true,
                             smConfig);
                 } catch (XenAPIException e) {
                     String errmsg = e.toString();
@@ -6463,19 +6462,30 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 if( result.indexOf("<UUID>") != -1) {
                     pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
                 }
-                if( pooluuid == null || pooluuid.length() != 36) {
-                    sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), poolId, type, "user", true,
+
+                if (pooluuid == null || pooluuid.length() != 36)
+                {
+                    sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true,
                             smConfig);
+
+                    created[0] = true; // note that the SR was created (as opposed to introduced)
                 } else {
-                    sr = SR.introduce(conn, pooluuid, pool.getUuid(), poolId,
-                            type, "user", true, smConfig);
-                    Pool.Record pRec = XenServerConnectionPool.getPoolRecord(conn);
-                    PBD.Record rec = new PBD.Record();
-                    rec.deviceConfig = deviceConfig;
-                    rec.host = pRec.master;
-                    rec.SR = sr;
-                    PBD pbd = PBD.create(conn, rec);
-                    pbd.plug(conn);
+                    sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel,
+                        type, "user", true, smConfig);
+
+                    Set<Host> setHosts = Host.getAll(conn);
+
+                    for (Host currentHost : setHosts) {
+                        PBD.Record rec = new PBD.Record();
+
+                        rec.deviceConfig = deviceConfig;
+                        rec.host = currentHost;
+                        rec.SR = sr;
+
+                        PBD pbd = PBD.create(conn, rec);
+
+                        pbd.plug(conn);
+                    }
                 }
                 sr.scan(conn);
                 return sr;
@@ -6636,6 +6646,52 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
+    // for about 1 GiB of physical size, about 4 MiB seems to be used for metadata
+    private long getMetadata(long physicalSize) {
+    	return (long)(physicalSize * 0.00390625); // 1 GiB / 4 MiB = 0.00390625
+    }
+
+    protected VDI handleSrAndVdiAttach(String iqn, String storageHostName,
+            String chapInitiatorName, String chapInitiatorPassword) throws Exception {
+        VDI vdi = null;
+
+        Connection conn = getConnection();
+
+        Boolean[] created = { false };
+
+        SR sr = getIscsiSR(conn, iqn,
+                    storageHostName, iqn,
+                    chapInitiatorName, chapInitiatorPassword, created);
+
+        // if created[0] is true, this means the SR was actually created...as opposed to introduced
+        if (created[0]) {
+            VDI.Record vdir = new VDI.Record();
+
+            vdir.nameLabel = iqn;
+            vdir.SR = sr;
+            vdir.type = Types.VdiType.USER;
+            vdir.virtualSize = sr.getPhysicalSize(conn) - sr.getPhysicalUtilisation(conn) - getMetadata(sr.getPhysicalSize(conn));
+
+            if (vdir.virtualSize < 0) {
+            	throw new Exception("VDI virtual size cannot be less than 0.");
+            }
+
+            vdi = VDI.create(conn, vdir);
+        }
+        else {
+            vdi = sr.getVDIs(conn).iterator().next();
+        }
+
+    	return vdi;
+    }
+
+    protected void handleSrAndVdiDetach(String iqn) throws Exception {
+        Connection conn = getConnection();
+
+        SR sr = getStorageRepository(conn, iqn);
+
+        removeSR(conn, sr);
+    }
 
     protected AttachVolumeAnswer execute(final AttachVolumeCommand cmd) {
         Connection conn = getConnection();
@@ -6652,7 +6708,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
 
         try {
             // Look up the VDI
-            VDI vdi = mount(conn, cmd.getPooltype(), cmd.getVolumeFolder(),cmd.getVolumePath());
+            VDI vdi = null;
+
+            if (cmd.getAttach() && cmd.isManaged()) {
+                vdi = handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(),
+                        cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
+            }
+            else {
+                vdi = getVDIbyUuid(conn, cmd.getVolumePath());
+            }
+
             // Look up the VM
             VM vm = getVM(conn, vmName);
             /* For HVM guest, if no pv driver installed, no attach/detach */
@@ -6704,7 +6769,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 // Update the VDI's label to include the VM name
                 vdi.setNameLabel(conn, vmName + "-DATA");
 
-                return new AttachVolumeAnswer(cmd, Long.parseLong(diskNumber));
+                return new AttachVolumeAnswer(cmd, Long.parseLong(diskNumber), vdi.getUuid(conn));
             } else {
                 // Look up all VBDs for this VDI
                 Set<VBD> vbds = vdi.getVBDs(conn);
@@ -6723,7 +6788,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 // Update the VDI's label to be "detached"
                 vdi.setNameLabel(conn, "detached");
 
-                umount(conn, vdi);
+                if (cmd.isManaged()) {
+                    handleSrAndVdiDetach(cmd.get_iScsiName());
+                }
 
                 return new AttachVolumeAnswer(cmd);
             }
@@ -7606,30 +7673,30 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
-    protected SR getStorageRepository(Connection conn, String uuid) {
+    protected SR getStorageRepository(Connection conn, String srNameLabel) {
         Set<SR> srs;
         try {
-            srs = SR.getByNameLabel(conn, uuid);
+            srs = SR.getByNameLabel(conn, srNameLabel);
         } catch (XenAPIException e) {
-            throw new CloudRuntimeException("Unable to get SR " + uuid + " due to " + e.toString(), e);
+            throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.toString(), e);
         } catch (Exception e) {
-            throw new CloudRuntimeException("Unable to get SR " + uuid + " due to " + e.getMessage(), e);
+            throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.getMessage(), e);
         }
 
         if (srs.size() > 1) {
-            throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + uuid);
+            throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + srNameLabel);
         } else if (srs.size() == 1) {
             SR sr = srs.iterator().next();
             if (s_logger.isDebugEnabled()) {
-                s_logger.debug("SR retrieved for " + uuid);
+                s_logger.debug("SR retrieved for " + srNameLabel);
             }
 
             if (checkSR(conn, sr)) {
                 return sr;
             }
-            throw new CloudRuntimeException("SR check failed for storage pool: " + uuid + "on host:" + _host.uuid);
+            throw new CloudRuntimeException("SR check failed for storage pool: " + srNameLabel + "on host:" + _host.uuid);
         } else {
-            throw new CloudRuntimeException("Can not see storage pool: " + uuid + " from on host:" + _host.uuid);
+            throw new CloudRuntimeException("Can not see storage pool: " + srNameLabel + " from on host:" + _host.uuid);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
index 399e234..e6358f2 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java
@@ -55,7 +55,6 @@ import org.apache.xmlrpc.XmlRpcException;
 
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.CreateStoragePoolCommand;
-import com.cloud.agent.api.CreateVolumeFromSnapshotAnswer;
 import com.cloud.agent.api.to.DataObjectType;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.DataTO;
@@ -171,7 +170,16 @@ public class XenServerStorageProcessor implements StorageProcessor {
         try {
             Connection conn = this.hypervisorResource.getConnection();
             // Look up the VDI
-            VDI vdi = this.hypervisorResource.mount(conn, null, null, data.getPath());
+            VDI vdi = null;
+
+            if (cmd.isManaged()) {
+                vdi = this.hypervisorResource.handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(),
+                        cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword());
+            }
+            else {
+                vdi = this.hypervisorResource.mount(conn, null, null, data.getPath());
+            }
+
             // Look up the VM
             VM vm = this.hypervisorResource.getVM(conn, vmName);
             /* For HVM guest, if no pv driver installed, no attach/detach */
@@ -223,7 +231,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
 
             // Update the VDI's label to include the VM name
             vdi.setNameLabel(conn, vmName + "-DATA");
-            DiskTO newDisk = new DiskTO(disk.getData(), Long.parseLong(diskNumber), disk.getType());
+            DiskTO newDisk = new DiskTO(disk.getData(), Long.parseLong(diskNumber), vdi.getUuid(conn), disk.getType());
             return new AttachAnswer(newDisk);
 
         } catch (XenAPIException e) {
@@ -350,6 +358,10 @@ public class XenServerStorageProcessor implements StorageProcessor {
 
             this.hypervisorResource.umount(conn, vdi);
 
+            if (cmd.isManaged()) {
+                this.hypervisorResource.handleSrAndVdiDetach(cmd.get_iScsiName());
+            }
+
             return new DettachAnswer(disk);
         } catch(Exception e) {
             s_logger.warn("Failed dettach volume: " + data.getPath());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
index 8d7c965..a233407 100644
--- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
@@ -95,7 +95,12 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
     }
 
     @Override
-    public void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+    public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
+        return null;
+    }
+
+    @Override
+    public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
         String errMsg = null;
         Answer answer = null;
         if (data.getType() == DataObjectType.VOLUME) {
@@ -118,7 +123,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
     }
 
     @Override
-    public void deleteAsync(DataObject data, AsyncCompletionCallback<CommandResult> callback) {
+    public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
         DeleteCommand cmd = new DeleteCommand(data.getTO());
 
         CommandResult result = new CommandResult();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
index 643c933..78f2263 100644
--- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
+++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java
@@ -54,6 +54,11 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
         return null;
     }
 
+    @Override
+    public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
+        return null;
+    }
+
     private class CreateVolumeContext<T> extends AsyncRpcConext<T> {
         private final DataObject volume;
         public CreateVolumeContext(AsyncCompletionCallback<T> callback, DataObject volume) {
@@ -77,7 +82,7 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
     }
 
     @Override
-    public void deleteAsync(DataObject vo, AsyncCompletionCallback<CommandResult> callback) {
+    public void deleteAsync(DataStore dataStore, DataObject vo, AsyncCompletionCallback<CommandResult> callback) {
         /*
          * DeleteCommand cmd = new DeleteCommand(vo.getUri());
          * 
@@ -146,7 +151,7 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
      */
 
     @Override
-    public void createAsync(DataObject vol, AsyncCompletionCallback<CreateCmdResult> callback) {
+    public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCallback<CreateCmdResult> callback) {
         EndPoint ep = selector.select(vol);
         CreateObjectCommand createCmd = new CreateObjectCommand(null);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/solidfire/pom.xml
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/pom.xml b/plugins/storage/volume/solidfire/pom.xml
index 9db0685..81af8ac 100644
--- a/plugins/storage/volume/solidfire/pom.xml
+++ b/plugins/storage/volume/solidfire/pom.xml
@@ -12,7 +12,7 @@
   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <artifactId>cloud-plugin-storage-volume-solidfire</artifactId>
-  <name>Apache CloudStack Plugin - Storage Volume solidfire</name>
+  <name>Apache CloudStack Plugin - Storage Volume SolidFire Provider</name>
   <parent>
     <groupId>org.apache.cloudstack</groupId>
     <artifactId>cloudstack-plugins</artifactId>
@@ -31,6 +31,11 @@
       <version>${cs.mysql.version}</version>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>${cs.gson.version}</version>
+    </dependency>
   </dependencies>
   <build>
     <defaultGoal>install</defaultGoal>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
index 960378c..329f27f 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
@@ -16,13 +16,46 @@
 // under the License.
 package org.apache.cloudstack.storage.datastore.driver;
 
-import com.cloud.agent.api.to.DataStoreTO;
-import com.cloud.agent.api.to.DataTO;
+import java.util.List;
+import java.util.Set;
+
+import javax.inject.Inject;
+
 import org.apache.cloudstack.engine.subsystem.api.storage.*;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.exception.StorageUnavailableException;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.user.AccountVO;
+import com.cloud.user.AccountDetailsDao;
+import com.cloud.user.AccountDetailVO;
+import com.cloud.user.dao.AccountDao;
 
 public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
+    private static final Logger s_logger = Logger.getLogger(SolidfirePrimaryDataStoreDriver.class);
+
+    @Inject private PrimaryDataStoreDao _storagePoolDao;
+    @Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
+    @Inject private VolumeDao _volumeDao;
+    @Inject private VolumeDetailsDao _volumeDetailsDao;
+    @Inject private DataCenterDao _zoneDao;
+    @Inject private AccountDao _accountDao;
+    @Inject private AccountDetailsDao _accountDetailsDao;
 
     @Override
     public DataTO getTO(DataObject data) {
@@ -34,12 +67,450 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         return null;
     }
 
+    private static class SolidFireConnection {
+        private final String _managementVip;
+        private final int _managementPort;
+        private final String _clusterAdminUsername;
+        private final String _clusterAdminPassword;
+
+        public SolidFireConnection(String managementVip, int managementPort,
+                String clusterAdminUsername, String clusterAdminPassword) {
+            _managementVip = managementVip;
+            _managementPort = managementPort;
+            _clusterAdminUsername = clusterAdminUsername;
+            _clusterAdminPassword = clusterAdminPassword;
+        }
+
+        public String getManagementVip() {
+            return _managementVip;
+        }
+
+        public int getManagementPort() {
+            return _managementPort;
+        }
+
+        public String getClusterAdminUsername() {
+            return _clusterAdminUsername;
+        }
+
+        public String getClusterAdminPassword() {
+            return _clusterAdminPassword;
+        }
+    }
+
+    private SolidFireConnection getSolidFireConnection(long storagePoolId) {
+        StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.MANAGEMENT_VIP);
+
+        String mVip = storagePoolDetail.getValue();
+
+        storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.MANAGEMENT_PORT);
+
+        int mPort = Integer.parseInt(storagePoolDetail.getValue());
+
+        storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_ADMIN_USERNAME);
+
+        String clusterAdminUsername = storagePoolDetail.getValue();
+
+        storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_ADMIN_PASSWORD);
+
+        String clusterAdminPassword = storagePoolDetail.getValue();
+
+        return new SolidFireConnection(mVip, mPort, clusterAdminUsername, clusterAdminPassword);
+    }
+
+    private SolidFireUtil.SolidFireAccount createSolidFireAccount(String sfAccountName,
+            SolidFireConnection sfConnection) {
+        try {
+            String mVip = sfConnection.getManagementVip();
+            int mPort = sfConnection.getManagementPort();
+            String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+            String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+            long accountNumber = SolidFireUtil.createSolidFireAccount(mVip, mPort,
+                clusterAdminUsername, clusterAdminPassword, sfAccountName);
+
+            return SolidFireUtil.getSolidFireAccountById(mVip, mPort,
+                clusterAdminUsername, clusterAdminPassword, accountNumber);
+        }
+        catch (Exception ex) {
+            throw new IllegalArgumentException(ex.getMessage());
+        }
+    }
+
+    private void updateCsDbWithAccountInfo(long csAccountId, SolidFireUtil.SolidFireAccount sfAccount) {
+        AccountDetailVO accountDetails = new AccountDetailVO(csAccountId,
+                SolidFireUtil.ACCOUNT_ID,
+                String.valueOf(sfAccount.getId()));
+
+        _accountDetailsDao.persist(accountDetails);
+
+        accountDetails = new AccountDetailVO(csAccountId,
+                SolidFireUtil.CHAP_INITIATOR_USERNAME,
+                String.valueOf(sfAccount.getName()));
+
+        _accountDetailsDao.persist(accountDetails);
+
+        accountDetails = new AccountDetailVO(csAccountId,
+                SolidFireUtil.CHAP_INITIATOR_SECRET,
+                String.valueOf(sfAccount.getInitiatorSecret()));
+
+        _accountDetailsDao.persist(accountDetails);
+
+        accountDetails = new AccountDetailVO(csAccountId,
+                SolidFireUtil.CHAP_TARGET_USERNAME,
+                sfAccount.getName());
+
+        _accountDetailsDao.persist(accountDetails);
+
+        accountDetails = new AccountDetailVO(csAccountId,
+                SolidFireUtil.CHAP_TARGET_SECRET,
+                sfAccount.getTargetSecret());
+
+        _accountDetailsDao.persist(accountDetails);
+    }
+
+    private class ChapInfoImpl implements ChapInfo {
+        private final String _initiatorUsername;
+        private final String _initiatorSecret;
+        private final String _targetUsername;
+        private final String _targetSecret;
+
+        public ChapInfoImpl(String initiatorUsername, String initiatorSecret,
+                String targetUsername, String targetSecret) {
+            _initiatorUsername = initiatorUsername;
+            _initiatorSecret = initiatorSecret;
+            _targetUsername = targetUsername;
+            _targetSecret = targetSecret;
+        }
+
+        public String getInitiatorUsername() {
+            return _initiatorUsername;
+        }
+
+        public String getInitiatorSecret() {
+            return _initiatorSecret;
+        }
+
+        public String getTargetUsername() {
+            return _targetUsername;
+        }
+
+        public String getTargetSecret() {
+            return _targetSecret;
+        }
+    }
+
+    @Override
+    public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
+        long accountId = volumeInfo.getAccountId();
+
+        AccountDetailVO accountDetail = _accountDetailsDao.findDetail(accountId, SolidFireUtil.CHAP_INITIATOR_USERNAME);
+
+        String chapInitiatorUsername = accountDetail.getValue();
+
+        accountDetail = _accountDetailsDao.findDetail(accountId, SolidFireUtil.CHAP_INITIATOR_SECRET);
+
+        String chapInitiatorSecret = accountDetail.getValue();
+
+        accountDetail = _accountDetailsDao.findDetail(accountId, SolidFireUtil.CHAP_TARGET_USERNAME);
+
+        String chapTargetUsername = accountDetail.getValue();
+
+        accountDetail = _accountDetailsDao.findDetail(accountId, SolidFireUtil.CHAP_TARGET_SECRET);
+
+        String chapTargetSecret = accountDetail.getValue();
+
+        return new ChapInfoImpl(chapInitiatorUsername, chapInitiatorSecret,
+                chapTargetUsername, chapTargetSecret);
+    }
+
+    private SolidFireUtil.SolidFireVolume createSolidFireVolume(VolumeInfo volumeInfo, SolidFireConnection sfConnection)
+            throws StorageUnavailableException, Exception
+    {
+        String mVip = sfConnection.getManagementVip();
+        int mPort = sfConnection.getManagementPort();
+        String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+        String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+        AccountDetailVO accountDetail = _accountDetailsDao.findDetail(volumeInfo.getAccountId(), SolidFireUtil.ACCOUNT_ID);
+        long sfAccountId = Long.parseLong(accountDetail.getValue());
+
+        final Iops iops;
+
+        Long minIops = volumeInfo.getMinIops();
+        Long maxIops = volumeInfo.getMaxIops();
+
+        if (minIops == null || minIops <= 0 ||
+            maxIops == null || maxIops <= 0) {
+            iops = new Iops(100, 15000);
+        }
+        else {
+            iops = new Iops(volumeInfo.getMinIops(), volumeInfo.getMaxIops());
+        }
+
+    	long sfVolumeId = SolidFireUtil.createSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword,
+    	        volumeInfo.getName(), sfAccountId, volumeInfo.getSize(), true,
+    	        iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
+
+    	return SolidFireUtil.getSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfVolumeId);
+    }
+
+    private static class Iops
+    {
+    	private final long _minIops;
+    	private final long _maxIops;
+    	private final long _burstIops;
+
+    	public Iops(long minIops, long maxIops) throws Exception
+    	{
+    	    if (minIops <= 0 || maxIops <= 0) {
+    	        throw new Exception("The 'Min IOPS' and 'Max IOPS' values must be greater than 0.");
+    	    }
+
+            if (minIops > maxIops) {
+                throw new Exception("The 'Min IOPS' value cannot exceed the 'Max IOPS' value.");
+            }
+
+            _minIops = minIops;
+            _maxIops = maxIops;
+
+            _burstIops = getBurstIops(_maxIops);
+    	}
+
+    	public long getMinIops()
+    	{
+    		return _minIops;
+    	}
+
+    	public long getMaxIops()
+    	{
+    		return _maxIops;
+    	}
+
+    	public long getBurstIops()
+    	{
+    		return _burstIops;
+    	}
+
+        private static long getBurstIops(long maxIops)
+        {
+        	return (long)(maxIops * 1.5);
+        }
+    }
+
+    private void deleteSolidFireVolume(VolumeInfo volumeInfo, SolidFireConnection sfConnection)
+            throws StorageUnavailableException, Exception
+    {
+        Long storagePoolId = volumeInfo.getPoolId();
+
+        if (storagePoolId == null) {
+            return; // this volume was never assigned to a storage pool, so no SAN volume should exist for it
+        }
+
+        String mVip = sfConnection.getManagementVip();
+        int mPort = sfConnection.getManagementPort();
+        String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+        String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+        long sfVolumeId = Long.parseLong(volumeInfo.getFolder());
+
+        SolidFireUtil.deleteSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfVolumeId);
+    }
+
+    private String getSfAccountName(String csAccountUuid, long csAccountId) {
+        return "CloudStack_" + csAccountUuid + "_" + getRandomNumber() + "_" + csAccountId;
+    }
+
+    private static long getRandomNumber()
+    {
+        return Math.round(Math.random() * 1000000000);
+    }
+
+    private boolean sfAccountExists(String sfAccountName, SolidFireConnection sfConnection) throws Exception {
+        String mVip = sfConnection.getManagementVip();
+        int mPort = sfConnection.getManagementPort();
+        String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+        String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+        try {
+            SolidFireUtil.getSolidFireAccountByName(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfAccountName);
+        }
+        catch (Exception ex) {
+            return false;
+        }
+
+        return true;
+    }
+
     @Override
-    public void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+    public void createAsync(DataStore dataStore, DataObject dataObject,
+            AsyncCompletionCallback<CreateCmdResult> callback) {
+        String iqn = null;
+        String errMsg = null;
+
+        if (dataObject.getType() == DataObjectType.VOLUME) {
+            try {
+                VolumeInfo volumeInfo = (VolumeInfo)dataObject;
+                AccountVO account = _accountDao.findById(volumeInfo.getAccountId());
+                String sfAccountName = getSfAccountName(account.getUuid(), account.getAccountId());
+
+                long storagePoolId = dataStore.getId();
+                SolidFireConnection sfConnection = getSolidFireConnection(storagePoolId);
+
+                if (!sfAccountExists(sfAccountName, sfConnection)) {
+                    SolidFireUtil.SolidFireAccount sfAccount = createSolidFireAccount(sfAccountName,
+                            sfConnection);
+
+                    updateCsDbWithAccountInfo(account.getId(), sfAccount);
+                }
+
+                SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(volumeInfo, sfConnection);
+
+                iqn = sfVolume.getIqn();
+
+                VolumeVO volume = this._volumeDao.findById(volumeInfo.getId());
+
+                volume.set_iScsiName(iqn);
+                volume.setFolder(String.valueOf(sfVolume.getId()));
+                volume.setPoolType(StoragePoolType.IscsiLUN);
+                volume.setPoolId(storagePoolId);
+
+                _volumeDao.update(volume.getId(), volume);
+
+                StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
+
+                long capacityBytes = storagePool.getCapacityBytes();
+                long usedBytes = storagePool.getUsedBytes();
+
+                usedBytes += volumeInfo.getSize();
+
+                if (usedBytes > capacityBytes) {
+                    usedBytes = capacityBytes;
+                }
+
+                storagePool.setUsedBytes(usedBytes);
+
+                _storagePoolDao.update(storagePoolId, storagePool);
+            } catch (StorageUnavailableException e) {
+                s_logger.error("Failed to create volume (StorageUnavailableException)", e);
+                errMsg = e.toString();
+            } catch (Exception e) {
+                s_logger.error("Failed to create volume (Exception)", e);
+                errMsg = e.toString();
+            }
+        }
+        else {
+            errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
+        }
+
+        // path = iqn
+        // size is pulled from DataObject instance, if errMsg is null
+        CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));
+
+        result.setResult(errMsg);
+
+        callback.complete(result);
+    }
+
+    private void deleteSolidFireAccount(long sfAccountId, SolidFireConnection sfConnection) throws Exception {
+        String mVip = sfConnection.getManagementVip();
+        int mPort = sfConnection.getManagementPort();
+        String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+        String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+        List<SolidFireUtil.SolidFireVolume> sfVolumes = SolidFireUtil.getDeletedVolumes(mVip, mPort,
+                clusterAdminUsername, clusterAdminPassword);
+
+        // if there are volumes for this account in the trash, delete them (so the account can be deleted)
+        if (sfVolumes != null) {
+            for (SolidFireUtil.SolidFireVolume sfVolume : sfVolumes) {
+                if (sfVolume.getAccountId() == sfAccountId) {
+                    SolidFireUtil.purgeSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfVolume.getId());
+                }
+            }
+        }
+
+        SolidFireUtil.deleteSolidFireAccount(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfAccountId);
+    }
+
+    private boolean sfAccountHasVolume(long sfAccountId, SolidFireConnection sfConnection) throws Exception {
+        String mVip = sfConnection.getManagementVip();
+        int mPort = sfConnection.getManagementPort();
+        String clusterAdminUsername = sfConnection.getClusterAdminUsername();
+        String clusterAdminPassword = sfConnection.getClusterAdminPassword();
+
+        List<SolidFireUtil.SolidFireVolume> sfVolumes = SolidFireUtil.getSolidFireVolumesForAccountId(mVip, mPort,
+                clusterAdminUsername, clusterAdminPassword, sfAccountId);
+
+        if (sfVolumes != null) {
+            for (SolidFireUtil.SolidFireVolume sfVolume : sfVolumes) {
+                if (sfVolume.isActive()) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
     }
 
     @Override
-    public void deleteAsync(DataObject data, AsyncCompletionCallback<CommandResult> callback) {
+    public void deleteAsync(DataStore dataStore, DataObject dataObject,
+            AsyncCompletionCallback<CommandResult> callback) {
+        String errMsg = null;
+
+        if (dataObject.getType() == DataObjectType.VOLUME) {
+            try {
+                VolumeInfo volumeInfo = (VolumeInfo)dataObject;
+                AccountVO account = _accountDao.findById(volumeInfo.getAccountId());
+                AccountDetailVO accountDetails = _accountDetailsDao.findDetail(account.getAccountId(), SolidFireUtil.ACCOUNT_ID);
+                long sfAccountId = Long.parseLong(accountDetails.getValue());
+
+                long storagePoolId = dataStore.getId();
+                SolidFireConnection sfConnection = getSolidFireConnection(storagePoolId);
+
+                deleteSolidFireVolume(volumeInfo, sfConnection);
+
+                _volumeDao.deleteVolumesByInstance(volumeInfo.getId());
+
+                if (!sfAccountHasVolume(sfAccountId, sfConnection)) {
+                    // delete the account from the SolidFire SAN
+                    deleteSolidFireAccount(sfAccountId, sfConnection);
+
+                    // delete the info in the account_details table
+                    // that's related to the SolidFire account
+                    _accountDetailsDao.deleteDetails(account.getAccountId());
+                }
+
+                StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
+
+                long usedBytes = storagePool.getUsedBytes();
+
+                usedBytes -= volumeInfo.getSize();
+
+                if (usedBytes < 0) {
+                    usedBytes = 0;
+                }
+
+                storagePool.setUsedBytes(usedBytes);
+
+                _storagePoolDao.update(storagePoolId, storagePool);
+            } catch (StorageUnavailableException e) {
+                s_logger.error("Failed to create volume (StorageUnavailableException)", e);
+                errMsg = e.toString();
+            } catch (Exception e) {
+                s_logger.error("Failed to create volume (Exception)", e);
+                errMsg = e.toString();
+            }
+        }
+        else {
+            errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
+        }
+
+        CommandResult result = new CommandResult();
+
+        result.setResult(errMsg);
+
+        callback.complete(result);
     }
 
     @Override
@@ -62,5 +533,4 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
     @Override
     public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
     }
-
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java
new file mode 100644
index 0000000..2e25cd5
--- /dev/null
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
+    @Inject PrimaryDataStoreDao storagePoolDao;
+    @Inject PrimaryDataStoreHelper dataStoreHelper;
+    @Inject StoragePoolAutomation storagePoolAutomation;
+    @Inject StoragePoolDetailsDao storagePoolDetailsDao;
+    @Inject DataCenterDao zoneDao;
+    
+    private static final int DEFAULT_MANAGEMENT_PORT = 443;
+    private static final int DEFAULT_STORAGE_PORT = 3260;
+    
+    // invoked to add primary storage that is based on the SolidFire plug-in
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+    	String url = (String)dsInfos.get("url");
+    	Long zoneId = (Long)dsInfos.get("zoneId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String)dsInfos.get("providerName");
+        Long capacityBytes = (Long)dsInfos.get("capacityBytes");
+        Long capacityIops = (Long)dsInfos.get("capacityIops");
+        String tags = (String)dsInfos.get("tags");
+        Map<String, String> details = (Map<String, String>)dsInfos.get("details");
+    	
+    	String storageVip = getStorageVip(url);
+    	int storagePort = getStoragePort(url);
+    	
+    	DataCenterVO zone = zoneDao.findById(zoneId);
+    	
+    	String uuid = SolidFireUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip;
+
+        if (capacityBytes == null || capacityBytes <= 0) {
+            throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
+        }
+    	
+    	if (capacityIops == null || capacityIops <= 0) {
+    	    throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
+    	}
+    	
+        PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
+        
+        parameters.setHost(storageVip);
+        parameters.setPort(storagePort);
+        parameters.setPath(getModifiedUrl(url));
+        parameters.setType(StoragePoolType.Iscsi);
+        parameters.setUuid(uuid);
+        parameters.setZoneId(zoneId);
+        parameters.setName(storagePoolName);
+        parameters.setProviderName(providerName);
+        parameters.setManaged(true);
+        parameters.setCapacityBytes(capacityBytes);
+        parameters.setUsedBytes(0);
+        parameters.setCapacityIops(capacityIops);
+        parameters.setHypervisorType(HypervisorType.Any);
+        parameters.setTags(tags);
+        parameters.setDetails(details);
+        
+        String managementVip = getManagementVip(url);
+        int managementPort = getManagementPort(url);
+        
+        details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
+        details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
+        
+        String clusterAdminUsername = getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
+        String clusterAdminPassword = getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
+        
+        details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
+        details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
+        
+        // this adds a row in the cloud.storage_pool table for this SolidFire cluster
+    	return dataStoreHelper.createPrimaryDataStore(parameters);
+    }
+    
+    // remove the clusterAdmin and password key/value pairs
+    private String getModifiedUrl(String originalUrl)
+    {
+    	StringBuilder sb = new StringBuilder();
+    	
+    	String delimiter = ";";
+    	
+    	StringTokenizer st = new StringTokenizer(originalUrl, delimiter);
+    	
+    	while (st.hasMoreElements()) {
+			String token = st.nextElement().toString();
+			
+			if (!token.startsWith(SolidFireUtil.CLUSTER_ADMIN_USERNAME) &&
+				!token.startsWith(SolidFireUtil.CLUSTER_ADMIN_PASSWORD)) {
+				sb.append(token).append(delimiter);
+			}
+    	}
+    	
+    	String modifiedUrl = sb.toString();
+    	int lastIndexOf = modifiedUrl.lastIndexOf(delimiter);
+    	
+    	if (lastIndexOf == (modifiedUrl.length() - delimiter.length())) {
+    		return modifiedUrl.substring(0, lastIndexOf);
+    	}
+    	
+    	return modifiedUrl;
+    }
+    
+    private String getManagementVip(String url)
+    {
+    	return getVip(SolidFireUtil.MANAGEMENT_VIP, url);
+    }
+    
+    private String getStorageVip(String url)
+    {
+    	return getVip(SolidFireUtil.STORAGE_VIP, url);
+    }
+    
+    private int getManagementPort(String url)
+    {
+    	return getPort(SolidFireUtil.MANAGEMENT_VIP, url, DEFAULT_MANAGEMENT_PORT);
+    }
+    
+    private int getStoragePort(String url)
+    {
+    	return getPort(SolidFireUtil.STORAGE_VIP, url, DEFAULT_STORAGE_PORT);
+    }
+    
+    private String getVip(String keyToMatch, String url)
+    {
+    	String delimiter = ":";
+    	
+    	String storageVip = getValue(keyToMatch, url);
+    	
+    	int index = storageVip.indexOf(delimiter);
+    	
+    	if (index != -1)
+    	{
+    		return storageVip.substring(0, index);
+    	}
+    	
+    	return storageVip;
+    }
+    
+    private int getPort(String keyToMatch, String url, int defaultPortNumber)
+    {
+    	String delimiter = ":";
+    	
+    	String storageVip = getValue(keyToMatch, url);
+    	
+    	int index = storageVip.indexOf(delimiter);
+    	
+    	int portNumber = defaultPortNumber;
+    	
+    	if (index != -1) {
+    		String port = storageVip.substring(index + delimiter.length());
+    		
+    		try {
+    			portNumber = Integer.parseInt(port);
+    		}
+    		catch (NumberFormatException ex) {
+    			throw new IllegalArgumentException("Invalid URL format (port is not an integer)");
+    		}
+    	}
+    	
+    	return portNumber;
+    }
+    
+    private String getValue(String keyToMatch, String url)
+    {
+    	String delimiter1 = ";";
+    	String delimiter2 = "=";
+    	
+    	StringTokenizer st = new StringTokenizer(url, delimiter1);
+    	
+    	while (st.hasMoreElements()) {
+			String token = st.nextElement().toString();
+			
+			int index = token.indexOf(delimiter2);
+			
+			if (index == -1)
+			{
+				throw new RuntimeException("Invalid URL format");
+			}
+			
+			String key = token.substring(0, index);
+			
+			if (key.equalsIgnoreCase(keyToMatch)) {
+				String valueToReturn = token.substring(index + delimiter2.length());
+				
+				return valueToReturn;
+			}
+		}
+    	
+    	throw new RuntimeException("Key not found in URL");
+    }
+    
+    // do not implement this method for SolidFire's plug-in
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+        return true; // should be ignored for zone-wide-only plug-ins like SolidFire's
+    }
+    
+    // do not implement this method for SolidFire's plug-in
+    @Override
+    public boolean attachCluster(DataStore store, ClusterScope scope) {
+    	return true; // should be ignored for zone-wide-only plug-ins like SolidFire's
+    }
+    
+    @Override
+    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
+    	dataStoreHelper.attachZone(dataStore);
+    	
+        return true;
+    }
+
+    
+    @Override
+    public boolean maintain(DataStore dataStore) {
+        storagePoolAutomation.maintain(dataStore);
+        dataStoreHelper.maintain(dataStore);
+        
+        return true;
+    }
+    
+    @Override
+    public boolean cancelMaintain(DataStore store) {
+        dataStoreHelper.cancelMaintain(store);
+        storagePoolAutomation.cancelMaintain(store);
+        
+        return true;
+    }
+    
+    // invoked to delete primary storage that is based on the SolidFire plug-in
+    @Override
+    public boolean deleteDataStore(DataStore store) {
+        return dataStoreHelper.deletePrimaryDataStore(store);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/99227f7b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
index 2965e8f..28864ea 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
@@ -1,62 +1,91 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
 package org.apache.cloudstack.storage.datastore.provider;
 
 import java.util.Map;
 import java.util.Set;
+import java.util.HashSet;
 
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
 import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.datastore.driver.SolidfirePrimaryDataStoreDriver;
+import org.apache.cloudstack.storage.datastore.lifecycle.SolidFirePrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
 import org.springframework.stereotype.Component;
 
+import com.cloud.utils.component.ComponentContext;
+
 @Component
 public class SolidfirePrimaryDataStoreProvider implements PrimaryDataStoreProvider {
-    private final String name = "Solidfire Primary Data Store Provider";
+    protected DataStoreLifeCycle lifecycle;
+    protected PrimaryDataStoreDriver driver;
+    protected HypervisorHostListener listener;
+
+    SolidfirePrimaryDataStoreProvider() {
+
+    }
 
     @Override
     public String getName() {
-        return name;
+        return SolidFireUtil.PROVIDER_NAME;
     }
 
     @Override
     public DataStoreLifeCycle getDataStoreLifeCycle() {
-        return null;
+        return lifecycle;
     }
 
     @Override
-    public DataStoreDriver getDataStoreDriver() {
-        return null;
+    public PrimaryDataStoreDriver getDataStoreDriver() {
+        return driver;
     }
 
     @Override
     public HypervisorHostListener getHostListener() {
-        return null;
+        return listener;
     }
 
     @Override
     public boolean configure(Map<String, Object> params) {
-        return false;
+        lifecycle = ComponentContext.inject(SolidFirePrimaryDataStoreLifeCycle.class);
+        driver = ComponentContext.inject(SolidfirePrimaryDataStoreDriver.class);
+        listener = ComponentContext.inject(new HypervisorHostListener() {
+            public boolean hostConnect(long hostId, long poolId) {
+                return true;
+            }
+
+            public boolean hostDisconnected(long hostId, long poolId) {
+                return true;
+            }
+        });
+
+        return true;
     }
 
     @Override
     public Set<DataStoreProviderType> getTypes() {
-        return null;
-    }
+        Set<DataStoreProviderType> types =  new HashSet<DataStoreProviderType>();
 
+        types.add(DataStoreProviderType.PRIMARY);
+
+        return types;
+    }
 }


[42/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3279 Need mechanism to detect if hotadd for cpu/memory is supported per a specific guest OS supported by VMware.

 Signed-off-by: Sateesh Chodapuneedi <sa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f9803418
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f9803418
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f9803418

Branch: refs/heads/vmsync
Commit: f980341808a0ca93f47ab9fa99ac5bb1eeb0b650
Parents: a12a13c
Author: Sateesh Chodapuneedi <sa...@apache.org>
Authored: Sat Jun 29 00:59:21 2013 +0530
Committer: Sateesh Chodapuneedi <sa...@apache.org>
Committed: Mon Jul 1 17:27:32 2013 +0530

----------------------------------------------------------------------
 .../hypervisor/vmware/resource/VmwareResource.java | 14 ++++++++++++--
 .../hypervisor/vmware/mo/VirtualMachineMO.java     | 17 +++++++++++++++++
 2 files changed, 29 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f9803418/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 968e095..286eb48 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -273,6 +273,7 @@ import com.vmware.vim25.ComputeResourceSummary;
 import com.vmware.vim25.DatastoreSummary;
 import com.vmware.vim25.DynamicProperty;
 import com.vmware.vim25.GuestInfo;
+import com.vmware.vim25.GuestOsDescriptor;
 import com.vmware.vim25.HostCapability;
 import com.vmware.vim25.HostFirewallInfo;
 import com.vmware.vim25.HostFirewallRuleset;
@@ -307,6 +308,7 @@ import com.vmware.vim25.VirtualDisk;
 import com.vmware.vim25.VirtualEthernetCard;
 import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
 import com.vmware.vim25.VirtualLsiLogicController;
+import com.vmware.vim25.VirtualMachineConfigOption;
 import com.vmware.vim25.VirtualMachineConfigSpec;
 import com.vmware.vim25.VirtualMachineFileInfo;
 import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
@@ -2545,9 +2547,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(),
             vmSpec.getMinSpeed(),(int) (vmSpec.getMaxRam()/(1024*1024)), ramMb,
             translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), vmSpec.getLimitCpuUse());
+            String guestOsId = translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value();
+            boolean guestSupportsCpuHotAdd = false;
+            boolean guestSupportsMemoryHotAdd = false;
+            GuestOsDescriptor vmGuestOsDescriptor = vmMo.getGuestOsDescriptor(guestOsId);
+            if (vmGuestOsDescriptor != null) {
+                guestSupportsCpuHotAdd = vmGuestOsDescriptor.isSupportsCpuHotAdd();
+                guestSupportsMemoryHotAdd = vmGuestOsDescriptor.isSupportsMemoryHotAdd();
+            }
 
-            vmConfigSpec.setMemoryHotAddEnabled(true);
-            vmConfigSpec.setCpuHotAddEnabled(true);
+            vmConfigSpec.setMemoryHotAddEnabled(guestSupportsMemoryHotAdd);
+            vmConfigSpec.setCpuHotAddEnabled(guestSupportsCpuHotAdd);
 
             if ("true".equals(vmSpec.getDetails().get(VmDetailConstants.NESTED_VIRTUALIZATION_FLAG))) {
                 s_logger.debug("Nested Virtualization enabled in configuration, checking hypervisor capability");

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f9803418/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
index cf5ffde..3e51047 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java
@@ -45,6 +45,7 @@ import com.vmware.vim25.CustomFieldStringValue;
 import com.vmware.vim25.DistributedVirtualSwitchPortConnection;
 import com.vmware.vim25.DynamicProperty;
 import com.vmware.vim25.GuestInfo;
+import com.vmware.vim25.GuestOsDescriptor;
 import com.vmware.vim25.HttpNfcLeaseDeviceUrl;
 import com.vmware.vim25.HttpNfcLeaseInfo;
 import com.vmware.vim25.HttpNfcLeaseState;
@@ -84,6 +85,7 @@ import com.vmware.vim25.VirtualIDEController;
 import com.vmware.vim25.VirtualLsiLogicController;
 import com.vmware.vim25.VirtualMachineCloneSpec;
 import com.vmware.vim25.VirtualMachineConfigInfo;
+import com.vmware.vim25.VirtualMachineConfigOption;
 import com.vmware.vim25.VirtualMachineConfigSpec;
 import com.vmware.vim25.VirtualMachineConfigSummary;
 import com.vmware.vim25.VirtualMachineFileInfo;
@@ -1593,6 +1595,21 @@ public class VirtualMachineMO extends BaseMO {
 		}
 	}
 
+    public GuestOsDescriptor getGuestOsDescriptor(String guestOsId) throws Exception {
+        GuestOsDescriptor guestOsDescriptor = null;
+        ManagedObjectReference vmEnvironmentBrowser =
+                _context.getVimClient().getMoRefProp(_mor, "environmentBrowser");
+        VirtualMachineConfigOption  vmConfigOption = _context.getService().queryConfigOption(vmEnvironmentBrowser, null, null);
+        List<GuestOsDescriptor> guestDescriptors = vmConfigOption.getGuestOSDescriptor();
+        for (GuestOsDescriptor descriptor : guestDescriptors) {
+            if (guestOsId != null && guestOsId.equalsIgnoreCase(descriptor.getId())) {
+                guestOsDescriptor = descriptor;
+                break;
+            }
+        }
+        return guestOsDescriptor;
+    }
+
 	public void plugDevice(VirtualDevice device) throws Exception {
         VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
         //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1];


[12/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3145:StorageManager-Scavenger NPEs when cleaning up
templates.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/2c31f38c
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/2c31f38c
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/2c31f38c

Branch: refs/heads/vmsync
Commit: 2c31f38c05c16d661812cb89317eb2d4e6c8faf3
Parents: 1659ee2
Author: Min Chen <mi...@citrix.com>
Authored: Fri Jun 28 15:21:57 2013 -0700
Committer: Min Chen <mi...@citrix.com>
Committed: Fri Jun 28 17:54:48 2013 -0700

----------------------------------------------------------------------
 .../com/cloud/storage/StorageManagerImpl.java   | 156 ++++++-------------
 1 file changed, 45 insertions(+), 111 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/2c31f38c/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 241f6e6..ff323cb 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -42,7 +42,6 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
 import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd;
-import com.cloud.server.ConfigurationServer;
 import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
 import org.apache.cloudstack.api.command.admin.storage.CreateCacheStoreCmd;
 import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
@@ -55,25 +54,21 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
-import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
 import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider;
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
-import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
-import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
-import org.apache.cloudstack.storage.command.DeleteCommand;
 import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
 import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
 import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
@@ -132,9 +127,9 @@ import com.cloud.hypervisor.HypervisorGuruManager;
 import com.cloud.org.Grouping;
 import com.cloud.org.Grouping.AllocationState;
 import com.cloud.resource.ResourceState;
+import com.cloud.server.ConfigurationServer;
 import com.cloud.server.ManagementServer;
 import com.cloud.server.StatsCollector;
-import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.Storage.StoragePoolType;
@@ -146,9 +141,6 @@ import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VMTemplatePoolDao;
 import com.cloud.storage.dao.VMTemplateZoneDao;
 import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.dao.VolumeHostDao;
-import com.cloud.storage.DiskOfferingVO;
-import com.cloud.storage.download.DownloadMonitor;
 import com.cloud.storage.listener.StoragePoolMonitor;
 import com.cloud.storage.listener.VolumeStateListener;
 import com.cloud.template.TemplateManager;
@@ -631,7 +623,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
                 store = lifeCycle.initialize(params);
             } else {
-                store = (DataStore) dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
+                store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
             }
 
             HostScope scope = new HostScope(host.getId(), host.getDataCenterId());
@@ -641,13 +633,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e);
         }
 
-        return (DataStore) dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
+        return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
     }
 
     @Override
     @SuppressWarnings("rawtypes")
     public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException,
-            ResourceUnavailableException {
+    ResourceUnavailableException {
         String providerName = cmd.getStorageProviderName();
         DataStoreProvider storeProvider = dataStoreProviderMgr.getDataStoreProvider(providerName);
 
@@ -695,7 +687,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                         "Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
             }
             if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware &&
-                hypervisorType != HypervisorType.Any) {
+                    hypervisorType != HypervisorType.Any) {
                 throw new InvalidParameterValueException(
                         "zone wide storage pool is not supported for hypervisor type " + hypervisor);
             }
@@ -883,16 +875,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
         if (storagePool.getPoolType() == StoragePoolType.NetworkFilesystem) {
             BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getDataCenterId());
             totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue();// All
-                                                                                                                        // this
-                                                                                                                        // for
-                                                                                                                        // the
-                                                                                                                        // inaccuracy
-                                                                                                                        // of
-                                                                                                                        // floats
-                                                                                                                        // for
-                                                                                                                        // big
-                                                                                                                        // number
-                                                                                                                        // multiplication.
+            // this
+            // for
+            // the
+            // inaccuracy
+            // of
+            // floats
+            // for
+            // big
+            // number
+            // multiplication.
         } else {
             totalOverProvCapacity = storagePool.getCapacityBytes();
         }
@@ -1103,63 +1095,34 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     @Override
     @DB
     public void cleanupSecondaryStorage(boolean recurring) {
+        // NOTE that object_store refactor will immediately delete the object from secondary storage when deleteTemplate etc api is issued.
+        // so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry.
         try {
-            // Cleanup templates in secondary storage hosts
+            // Cleanup templates in template_store_ref
             List<DataStore> imageStores = this.dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
             for (DataStore store : imageStores) {
                 try {
                     long storeId = store.getId();
                     List<TemplateDataStoreVO> destroyedTemplateStoreVOs = this._templateStoreDao.listDestroyed(storeId);
                     s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size()
-                            + " templates to cleanup on secondary storage host: " + store.getName());
+                            + " templates to cleanup on template_store_ref for store: " + store.getName());
                     for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) {
-                        if (!_tmpltMgr.templateIsDeleteable(destroyedTemplateStoreVO.getTemplateId())) {
-                            if (s_logger.isDebugEnabled()) {
-                                s_logger.debug("Not deleting template at: " + destroyedTemplateStoreVO);
-                            }
-                            continue;
-                        }
-
                         if (s_logger.isDebugEnabled()) {
-                            s_logger.debug("Deleting template store: " + destroyedTemplateStoreVO);
-                        }
-
-                        VMTemplateVO destroyedTemplate = this._vmTemplateDao.findById(destroyedTemplateStoreVO.getTemplateId());
-                        if (destroyedTemplate == null) {
-                            s_logger.error("Cannot find template : " + destroyedTemplateStoreVO.getTemplateId() + " from template table");
-                            throw new CloudRuntimeException("Template " + destroyedTemplateStoreVO.getTemplateId()
-                                    + " is found in secondary storage, but not found in template table");
-                        }
-                        String installPath = destroyedTemplateStoreVO.getInstallPath();
-
-                        TemplateInfo tmpl = tmplFactory.getTemplate(destroyedTemplateStoreVO.getTemplateId(), store);
-                        if (installPath != null) {
-                            EndPoint ep = _epSelector.select(store);
-                            Command cmd = new DeleteCommand(tmpl.getTO());
-                            Answer answer = ep.sendMessage(cmd);
-
-                            if (answer == null || !answer.getResult()) {
-                                s_logger.debug("Failed to delete " + destroyedTemplateStoreVO + " due to "
-                                        + ((answer == null) ? "answer is null" : answer.getDetails()));
-                            } else {
-                                _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
-                                s_logger.debug("Deleted template at: " + destroyedTemplateStoreVO.getInstallPath());
-                            }
-                        } else {
-                            _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
+                            s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO);
                         }
+                        _templateStoreDao.remove(destroyedTemplateStoreVO.getId());
                     }
                 } catch (Exception e) {
-                    s_logger.warn("problem cleaning up templates in secondary storage store " + store.getName(), e);
+                    s_logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e);
                 }
             }
 
-            // CleanUp snapshots on Secondary Storage.
+            // CleanUp snapshots on snapshot_store_ref
             for (DataStore store : imageStores) {
                 try {
                     List<SnapshotDataStoreVO> destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId());
                     s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size()
-                            + " snapshots to cleanup on secondary storage host: " + store.getName());
+                            + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName());
                     for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) {
                         // check if this snapshot has child
                         SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store);
@@ -1169,70 +1132,37 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                         }
 
                         if (s_logger.isDebugEnabled()) {
-                            s_logger.debug("Deleting snapshot on store: " + destroyedSnapshotStoreVO);
+                            s_logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO);
                         }
 
-                        String installPath = destroyedSnapshotStoreVO.getInstallPath();
-
-                        if (installPath != null) {
-                            EndPoint ep = _epSelector.select(store);
-                            DeleteCommand cmd = new DeleteCommand(snap.getTO());
-                            Answer answer = ep.sendMessage(cmd);
-                            if (answer == null || !answer.getResult()) {
-                                s_logger.debug("Failed to delete " + destroyedSnapshotStoreVO + " due to "
-                                        + ((answer == null) ? "answer is null" : answer.getDetails()));
-                            } else {
-                                _volumeStoreDao.remove(destroyedSnapshotStoreVO.getId());
-                                s_logger.debug("Deleted snapshot at: " + destroyedSnapshotStoreVO.getInstallPath());
-                            }
-                        } else {
-                            _snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId());
-                        }
+                        _snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId());
                     }
 
                 } catch (Exception e2) {
-                    s_logger.warn("problem cleaning up snapshots in secondary storage store " + store.getName(), e2);
+                    s_logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2);
                 }
 
             }
 
-            // CleanUp volumes on Secondary Storage.
+            // CleanUp volumes on volume_store_ref
             for (DataStore store : imageStores) {
                 try {
                     List<VolumeDataStoreVO> destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId());
                     s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size()
-                            + " volumes to cleanup on secondary storage host: " + store.getName());
+                            + " volumes to cleanup on volume_store_ref for store: " + store.getName());
                     for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) {
                         if (s_logger.isDebugEnabled()) {
-                            s_logger.debug("Deleting volume on store: " + destroyedStoreVO);
-                        }
-
-                        String installPath = destroyedStoreVO.getInstallPath();
-
-                        VolumeInfo vol = this.volFactory.getVolume(destroyedStoreVO.getVolumeId(), store);
-
-                        if (installPath != null) {
-                            EndPoint ep = _epSelector.select(store);
-                            DeleteCommand cmd = new DeleteCommand(vol.getTO());
-                            Answer answer = ep.sendMessage(cmd);
-                            if (answer == null || !answer.getResult()) {
-                                s_logger.debug("Failed to delete " + destroyedStoreVO + " due to "
-                                        + ((answer == null) ? "answer is null" : answer.getDetails()));
-                            } else {
-                                _volumeStoreDao.remove(destroyedStoreVO.getId());
-                                s_logger.debug("Deleted volume at: " + destroyedStoreVO.getInstallPath());
-                            }
-                        } else {
-                            _volumeStoreDao.remove(destroyedStoreVO.getId());
+                            s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO);
                         }
+                        _volumeStoreDao.remove(destroyedStoreVO.getId());
                     }
 
                 } catch (Exception e2) {
-                    s_logger.warn("problem cleaning up volumes in secondary storage store " + store.getName(), e2);
+                    s_logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2);
                 }
             }
         } catch (Exception e3) {
-            s_logger.warn("problem cleaning up secondary storage ", e3);
+            s_logger.warn("problem cleaning up secondary storage DB entries. ", e3);
         }
     }
 
@@ -1251,7 +1181,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     @Override
     @DB
     public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException,
-            InsufficientCapacityException {
+    InsufficientCapacityException {
         Long userId = UserContext.current().getCallerUserId();
         User user = _userDao.findById(userId);
         Account account = UserContext.current().getCaller();
@@ -1356,7 +1286,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                         if (pool != null
                                 && (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)
                                         || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) || pool.getStatus().equals(
-                                        StoragePoolStatus.CancelMaintenance))) {
+                                                StoragePoolStatus.CancelMaintenance))) {
                             _storagePoolWorkDao.removePendingJobsOnMsRestart(vo.getMsid(), poolId);
                             pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
                             _storagePoolDao.update(poolId, pool);
@@ -1567,8 +1497,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     @Override
     public boolean storagePoolHasEnoughIops(List<Volume> requestedVolumes,
             StoragePool pool) {
-        if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null)
+        if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null) {
             return false;
+        }
 
         long currentIops = 0;
 
@@ -1600,11 +1531,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     @Override
     public boolean storagePoolHasEnoughSpace(List<Volume> volumes,
             StoragePool pool) {
-        if (volumes == null || volumes.isEmpty())
+        if (volumes == null || volumes.isEmpty()){
             return false;
+        }
 
-        if (!checkUsagedSpace(pool))
+        if (!checkUsagedSpace(pool)) {
             return false;
+        }
 
         // allocated space includes template of specified volume
         StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
@@ -1617,8 +1550,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                     allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
                 }
             }
-            if (volume.getState() != Volume.State.Ready)
+            if (volume.getState() != Volume.State.Ready) {
                 totalAskingSize = totalAskingSize + volume.getSize();
+            }
         }
 
         long totalOverProvCapacity;


[18/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
DataStore - provider, lifecycle, driver implementations for simulator

mock implementation to manage secondary storage related operations for
the simulator.

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/fd867d5a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/fd867d5a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/fd867d5a

Branch: refs/heads/vmsync
Commit: fd867d5afa819d89c22238fcc0a45dacb84a9c70
Parents: 085e883
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Tue Jun 25 19:37:04 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 13:52:16 2013 +0530

----------------------------------------------------------------------
 client/tomcatconf/applicationContext.xml.in     |   3 +-
 .../tomcatconf/simulatorComponentContext.xml.in |  11 +-
 .../agent/manager/MockStorageManagerImpl.java   |  11 +-
 plugins/pom.xml                                 |   7 +-
 plugins/storage/image/simulator/pom.xml         |  68 +++++++++
 .../driver/SimulatorImageStoreDriverImpl.java   | 149 +++++++++++++++++++
 .../SimulatorImageStoreLifeCycleImpl.java       | 132 ++++++++++++++++
 .../SimulatorImageStoreProviderImpl.java        |  98 ++++++++++++
 8 files changed, 463 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in
index 7052fd7..5c61b4e2 100644
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@ -805,8 +805,9 @@
   <bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl" />
   <bean id="cloudStackImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl" />
   <bean id="s3ImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl" />
-  <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />  
+  <bean id="swiftImageStoreProviderImpl" class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl" />
   <bean id="solidFireDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider" />
+  <bean id="simulatorImageStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />
   <bean id="ApplicationLoadBalancerService" class="org.apache.cloudstack.network.lb.ApplicationLoadBalancerManagerImpl" />
   <bean id="InternalLoadBalancerVMManager" class="org.apache.cloudstack.network.lb.InternalLoadBalancerVMManagerImpl" />
   <bean id="StorageCacheReplacementAlgorithm" class="org.apache.cloudstack.storage.cache.manager.StorageCacheReplacementAlgorithmLRU" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/client/tomcatconf/simulatorComponentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in
index d71cf16..cfe0a9a 100644
--- a/client/tomcatconf/simulatorComponentContext.xml.in
+++ b/client/tomcatconf/simulatorComponentContext.xml.in
@@ -37,9 +37,9 @@
   <bean id="configurationDaoImpl" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
 
   <!-- simulator components -->
-  <bean id="SimulatorSecondaryDiscoverer" class="com.cloud.resource.SimulatorSecondaryDiscoverer">
-    <property name="name" value="SecondaryStorage"/>
-  </bean>
+  <!--<bean id="SimulatorSecondaryDiscoverer" class="com.cloud.resource.SimulatorSecondaryDiscoverer">-->
+    <!--<property name="name" value="SecondaryStorage"/>-->
+  <!--</bean>-->
   <bean id="SimulatorDiscoverer" class="com.cloud.resource.SimulatorDiscoverer">
     <property name="name" value="Simulator Agent"/>
   </bean>
@@ -215,6 +215,11 @@
     </property>
   </bean>
 
+  <!--
+    Image Store
+  -->
+  <!--<bean id="simulatorImageStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.SimulatorImageStoreProviderImpl" />-->
+
   <bean id="GlobalLoadBalancingRulesServiceImpl"
         class="org.apache.cloudstack.region.gslb.GlobalLoadBalancingRulesServiceImpl"/>
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
index a59949f..bc5aa58 100644
--- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
+++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java
@@ -476,7 +476,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
         }
         Transaction txn = Transaction.open(Transaction.SIMULATOR_DB);
         MockSecStorageVO storage = null;
-        String nfsUrl = ((NfsTO)cmd.getDataStore()).getUrl();
+        String nfsUrl = ((NfsTO) store).getUrl();
         try {
             txn.start();
             storage = _mockSecStorageDao.findByUrl(nfsUrl);
@@ -868,19 +868,16 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
 
     @Override
     public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
-        // TODO Auto-generated method stub
         return true;
     }
 
     @Override
     public boolean start() {
-        // TODO Auto-generated method stub
         return true;
     }
 
     @Override
     public boolean stop() {
-        // TODO Auto-generated method stub
         return true;
     }
 
@@ -891,17 +888,13 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa
 
     @Override
     public void preinstallTemplates(String url, long zoneId) {
-        Transaction txn = Transaction.open(Transaction.SIMULATOR_DB);
         MockSecStorageVO storage = null;
+        Transaction txn = Transaction.open(Transaction.SIMULATOR_DB);
         try {
-            txn.start();
             storage = _mockSecStorageDao.findByUrl(url);
-            txn.commit();
         } catch (Exception ex) {
-            txn.rollback();
             throw new CloudRuntimeException("Unable to find sec storage at " + url, ex);
         } finally {
-            txn.close();
             txn = Transaction.open(Transaction.CLOUD_DB);
             txn.close();
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/pom.xml
----------------------------------------------------------------------
diff --git a/plugins/pom.xml b/plugins/pom.xml
index 9ad56c6..04eb55c 100755
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@ -35,7 +35,7 @@
     <module>api/rate-limit</module>
     <module>api/discovery</module>
     <module>acl/static-role-based</module>
-	<module>affinity-group-processors/host-anti-affinity</module>
+	  <module>affinity-group-processors/host-anti-affinity</module>
     <module>affinity-group-processors/explicit-dedication</module>
     <module>deployment-planners/user-concentrated-pod</module>
     <module>deployment-planners/user-dispersing</module>
@@ -64,7 +64,8 @@
     <module>storage/image/s3</module>
     <module>storage/image/swift</module>
     <module>storage/image/default</module>
-    <module>storage/image/sample</module>    
+    <module>storage/image/sample</module>
+    <module>storage/image/simulator</module>
     <module>storage/volume/solidfire</module>
     <module>storage/volume/default</module>
     <module>storage/volume/sample</module>
@@ -158,8 +159,8 @@
       </activation>
       <modules>
         <module>hypervisors/simulator</module>
+        <module>storage/image/simulator</module>
       </modules>
      </profile>
   </profiles>
-
 </project>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/storage/image/simulator/pom.xml
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/pom.xml b/plugins/storage/image/simulator/pom.xml
new file mode 100644
index 0000000..d4b6838
--- /dev/null
+++ b/plugins/storage/image/simulator/pom.xml
@@ -0,0 +1,68 @@
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
+  license agreements. See the NOTICE file distributed with this work for additional
+  information regarding copyright ownership. The ASF licenses this file to
+  you under the Apache License, Version 2.0 (the "License"); you may not use
+  this file except in compliance with the License. You may obtain a copy of
+  the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
+  by applicable law or agreed to in writing, software distributed under the
+  License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+  OF ANY KIND, either express or implied. See the License for the specific
+  language governing permissions and limitations under the License. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>cloud-plugin-storage-image-simulator</artifactId>
+  <name>Apache CloudStack Plugin - Storage Image Simulator provider</name>
+  <parent>
+    <groupId>org.apache.cloudstack</groupId>
+    <artifactId>cloudstack-plugins</artifactId>
+    <version>4.2.0-SNAPSHOT</version>
+    <relativePath>../../../pom.xml</relativePath>
+  </parent>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-storage</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-storage-image</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-storage-volume</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-storage-snapshot</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>mysql</groupId>
+      <artifactId>mysql-connector-java</artifactId>
+      <version>${cs.mysql.version}</version>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <defaultGoal>install</defaultGoal>
+    <sourceDirectory>src</sourceDirectory>
+    <testSourceDirectory>test</testSourceDirectory>
+    <plugins>
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>integration-test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
new file mode 100644
index 0000000..74177ed
--- /dev/null
+++ b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.datastore.driver;
+
+
+import com.cloud.agent.api.storage.DownloadAnswer;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.storage.Storage;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VolumeDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.async.AsyncRpcContext;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
+import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl;
+import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
+
+import javax.inject.Inject;
+import java.util.Date;
+
+public class SimulatorImageStoreDriverImpl extends BaseImageStoreDriverImpl {
+
+    @Inject
+    TemplateDataStoreDao _templateStoreDao;
+    @Inject
+    VMTemplateDao _templateDao;
+    @Inject
+    VolumeDao _volumeDao;
+    @Inject
+    VolumeDataStoreDao _volumeStoreDao;
+
+    @Override
+    public DataStoreTO getStoreTO(DataStore store) {
+        ImageStoreImpl nfsStore = (ImageStoreImpl) store;
+        NfsTO nfsTO = new NfsTO();
+        nfsTO.setRole(store.getRole());
+        nfsTO.setUrl(nfsStore.getUri());
+        return nfsTO;
+    }
+
+    class CreateContext<T> extends AsyncRpcContext<T> {
+        final DataObject data;
+
+        public CreateContext(AsyncCompletionCallback<T> callback, DataObject data) {
+            super(callback);
+            this.data = data;
+        }
+    }
+
+    public String createEntityExtractUrl(DataStore store, String installPath, Storage.ImageFormat format) {
+        return null;
+    }
+
+    @Override
+    public void createAsync(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
+        CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
+        AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher
+                .create(this);
+        caller.setContext(context);
+        if (data.getType() == DataObjectType.TEMPLATE) {
+            this.createTemplateAsyncCallback(caller, context);
+        } else if (data.getType() == DataObjectType.VOLUME) {
+            this.createVolumeAsyncCallback(caller, context);
+        }
+    }
+
+    protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
+                                               CreateContext<CreateCmdResult> context) {
+        DownloadAnswer answer = callback.getResult();
+        DataObject obj = context.data;
+        DataStore store = obj.getDataStore();
+
+        TemplateDataStoreVO tmpltStoreVO = _templateStoreDao.findByStoreTemplate(store.getId(), obj.getId());
+        if (tmpltStoreVO != null) {
+            TemplateDataStoreVO updateBuilder = _templateStoreDao.createForUpdate();
+            updateBuilder.setDownloadPercent(answer.getDownloadPct());
+            updateBuilder.setDownloadState(answer.getDownloadStatus());
+            updateBuilder.setLastUpdated(new Date());
+            updateBuilder.setErrorString(answer.getErrorString());
+            updateBuilder.setJobId(answer.getJobId());
+            updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
+            updateBuilder.setInstallPath(answer.getInstallPath());
+            updateBuilder.setSize(answer.getTemplateSize());
+            updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
+            _templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
+            // update size in vm_template table
+            VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
+            tmlptUpdater.setSize(answer.getTemplateSize());
+            _templateDao.update(obj.getId(), tmlptUpdater);
+        }
+
+        return null;
+    }
+
+    protected Void createVolumeAsyncCallback(AsyncCallbackDispatcher<SimulatorImageStoreDriverImpl, DownloadAnswer> callback,
+                                             CreateContext<CreateCmdResult> context) {
+        DownloadAnswer answer = callback.getResult();
+        DataObject obj = context.data;
+        DataStore store = obj.getDataStore();
+
+        VolumeDataStoreVO volStoreVO = _volumeStoreDao.findByStoreVolume(store.getId(), obj.getId());
+        if (volStoreVO != null) {
+            VolumeDataStoreVO updateBuilder = _volumeStoreDao.createForUpdate();
+            updateBuilder.setDownloadPercent(answer.getDownloadPct());
+            updateBuilder.setDownloadState(answer.getDownloadStatus());
+            updateBuilder.setLastUpdated(new Date());
+            updateBuilder.setErrorString(answer.getErrorString());
+            updateBuilder.setJobId(answer.getJobId());
+            updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
+            updateBuilder.setInstallPath(answer.getInstallPath());
+            updateBuilder.setSize(answer.getTemplateSize());
+            updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
+            _volumeStoreDao.update(volStoreVO.getId(), updateBuilder);
+            // update size in volume table
+            VolumeVO volUpdater = _volumeDao.createForUpdate();
+            volUpdater.setSize(answer.getTemplateSize());
+            _volumeDao.update(obj.getId(), volUpdater);
+        }
+
+        return null;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java
new file mode 100644
index 0000000..beaa7a5
--- /dev/null
+++ b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.UriUtils;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
+import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
+import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
+import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
+import org.apache.log4j.Logger;
+
+import javax.inject.Inject;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SimulatorImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
+    private static final Logger s_logger = Logger.getLogger(SimulatorImageStoreLifeCycleImpl.class);
+
+    @Inject
+    ImageStoreHelper imageStoreHelper;
+
+    @Inject
+    ImageStoreProviderManager imageStoreMgr;
+
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        Long dcId = (Long) dsInfos.get("zoneId");
+        String url = (String) dsInfos.get("url");
+        String name = (String) dsInfos.get("name");
+        if (name == null) {
+            name = url;
+        }
+        String providerName = (String) dsInfos.get("providerName");
+        DataStoreRole role = (DataStoreRole) dsInfos.get("role");
+        Map<String, String> details = (Map<String, String>) dsInfos.get("details");
+
+        s_logger.info("Trying to add a new data store at " + url + " to data center " + dcId);
+
+        URI uri;
+        try {
+            uri = new URI(UriUtils.encodeURIComponent(url));
+            if (uri.getScheme() == null) {
+                throw new InvalidParameterValueException("uri.scheme is null " + url + ", add nfs:// as a prefix");
+            } else if (uri.getScheme().equalsIgnoreCase("nfs")) {
+                if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null
+                        || uri.getPath().equalsIgnoreCase("")) {
+                    throw new InvalidParameterValueException(
+                            "Your host and/or path is wrong.  Make sure it is of the format nfs://hostname/path");
+                }
+            }
+        } catch (URISyntaxException e) {
+            throw new InvalidParameterValueException(url + " is not a valid uri");
+        }
+
+        if (dcId == null) {
+            throw new InvalidParameterValueException(
+                    "DataCenter id is null, and simulator image store has to be associated with a data center");
+        }
+
+        Map<String, Object> imageStoreParameters = new HashMap<String, Object>();
+        imageStoreParameters.put("name", name);
+        imageStoreParameters.put("zoneId", dcId);
+        imageStoreParameters.put("url", url);
+        imageStoreParameters.put("protocol", uri.getScheme().toLowerCase());
+        imageStoreParameters.put("scope", ScopeType.ZONE);
+        imageStoreParameters.put("providerName", providerName);
+        imageStoreParameters.put("role", role);
+
+        ImageStoreVO ids = imageStoreHelper.createImageStore(imageStoreParameters, details);
+        return imageStoreMgr.getImageStore(ids.getId());
+    }
+
+    @Override
+    public boolean attachCluster(DataStore store, ClusterScope scope) {
+        return false;
+    }
+
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+        return false;
+    }
+
+    @Override
+    public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
+        return false;
+    }
+
+    @Override
+    public boolean maintain(DataStore store) {
+        return false;
+    }
+
+    @Override
+    public boolean cancelMaintain(DataStore store) {
+        return false;
+    }
+
+    @Override
+    public boolean deleteDataStore(DataStore store) {
+        return false;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fd867d5a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/provider/SimulatorImageStoreProviderImpl.java
----------------------------------------------------------------------
diff --git a/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/provider/SimulatorImageStoreProviderImpl.java b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/provider/SimulatorImageStoreProviderImpl.java
new file mode 100644
index 0000000..775d743
--- /dev/null
+++ b/plugins/storage/image/simulator/src/org/apache/cloudstack/storage/datastore/provider/SimulatorImageStoreProviderImpl.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.datastore.provider;
+
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.component.ComponentContext;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider;
+import org.apache.cloudstack.storage.datastore.driver.SimulatorImageStoreDriverImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.SimulatorImageStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.image.ImageStoreDriver;
+import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
+import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
+import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
+import org.springframework.stereotype.Component;
+
+import javax.inject.Inject;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+@Component
+public class SimulatorImageStoreProviderImpl implements ImageStoreProvider {
+
+    private final String _providerName = DataStoreProvider.NFS_IMAGE;
+    protected ImageStoreLifeCycle lifeCycle;
+    protected ImageStoreDriver driver;
+
+    @Inject
+    ImageStoreProviderManager storeMgr;
+    @Inject
+    ImageStoreHelper helper;
+
+    @Override
+    public boolean isScopeSupported(ScopeType scope) {
+        return true;
+    }
+
+    @Override
+    public boolean needDownloadSysTemplate() {
+        return false;
+    }
+
+    @Override
+    public DataStoreLifeCycle getDataStoreLifeCycle() {
+        return this.lifeCycle;
+    }
+
+    @Override
+    public DataStoreDriver getDataStoreDriver() {
+        return this.driver;
+    }
+
+    @Override
+    public HypervisorHostListener getHostListener() {
+        return null;
+    }
+
+    @Override
+    public String getName() {
+        return this._providerName;
+    }
+
+    @Override
+    public boolean configure(Map<String, Object> params) {
+        lifeCycle = ComponentContext.inject(SimulatorImageStoreLifeCycleImpl.class);
+        driver = ComponentContext.inject(SimulatorImageStoreDriverImpl.class);
+        storeMgr.registerDriver(this.getName(), driver);
+        return true;
+    }
+
+    @Override
+    public Set<DataStoreProviderType> getTypes() {
+        Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
+        types.add(DataStoreProviderType.IMAGE);
+        return types;
+    }
+}


[11/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3082: System VMs are failed to start with Xen 6.2.0( Failing to create VIF's)


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/1659ee22
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/1659ee22
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/1659ee22

Branch: refs/heads/vmsync
Commit: 1659ee225c936b074bae1ede955194f60c546c02
Parents: 58f9202
Author: Sanjay Tripathi <sa...@citrix.com>
Authored: Wed Jun 26 12:34:31 2013 +0530
Committer: Devdeep Singh <de...@gmail.com>
Committed: Sat Jun 29 05:39:33 2013 +0530

----------------------------------------------------------------------
 .../src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/1659ee22/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index 4abf127..6f2e842 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -897,6 +897,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             vifr.MAC = "FE:FF:FF:FF:FF:FF";
             vifr.network = nw;
 
+            vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
             dom0vif = VIF.create(conn, vifr);
         }
         // At this stage we surely have a VIF
@@ -1105,6 +1106,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
             vifr.qosAlgorithmParams.put("kbps", Integer.toString(nic.getNetworkRateMbps() * 128));
         }
 
+        vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
         VIF vif = VIF.create(conn, vifr);
         if (s_logger.isDebugEnabled()) {
             vifr = vif.getRecord(conn);
@@ -4965,6 +4967,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
                 vifr.otherConfig = config;
                 vifr.MAC = "FE:FF:FF:FF:FF:FF";
                 vifr.network = linkLocal;
+                vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
                 dom0vif = VIF.create(conn, vifr);
                 dom0vif.plug(conn);
             } else {


[41/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3300: little fix for doc build


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/a12a13cc
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/a12a13cc
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/a12a13cc

Branch: refs/heads/vmsync
Commit: a12a13ccc9f5969d21774bcfa10a6c60c65a3bde
Parents: b68cc33
Author: Sebastien Goasguen <ru...@gmail.com>
Authored: Mon Jul 1 06:33:16 2013 -0400
Committer: Sebastien Goasguen <ru...@gmail.com>
Committed: Mon Jul 1 06:33:16 2013 -0400

----------------------------------------------------------------------
 docs/en-US/upload-existing-volume-to-vm.xml | 53 ------------------------
 1 file changed, 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a12a13cc/docs/en-US/upload-existing-volume-to-vm.xml
----------------------------------------------------------------------
diff --git a/docs/en-US/upload-existing-volume-to-vm.xml b/docs/en-US/upload-existing-volume-to-vm.xml
index 6be43f8..4681374 100644
--- a/docs/en-US/upload-existing-volume-to-vm.xml
+++ b/docs/en-US/upload-existing-volume-to-vm.xml
@@ -22,58 +22,6 @@
  under the License.
 -->
 <section id="upload-existing-volume-to-vm">
-<<<<<<< HEAD
-    <title>Uploading an Existing Volume to a Virtual Machine</title>
-        <para>Existing data can be made accessible to a virtual machine. This is called uploading a volume to the VM. For example, this is useful to upload data from a local file system and attach it to a VM. Root administrators, domain administrators, and end users can all upload existing volumes to VMs.</para>
-        <para>The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary storage</para>
-        <para>You cannot upload a volume if the preconfigured volume limit has already been reached. The default limit for the cloud is set in the global configuration parameter max.account.volumes, but administrators can also set per-domain limits that are different from the global default. See Setting Usage Limits </para>
-        <para>To upload a volume:</para>    
-    <orderedlist>
-        <listitem id="optional-hash"><para>(Optional) Create an MD5 hash (checksum) of the disk image file that you are going to upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data corruption has occurred.</para></listitem>
-        <listitem><para>Log in to the &PRODUCT; UI as an administrator or user</para></listitem>
-        <listitem><para>In the left navigation bar, click Storage.</para></listitem>
-        <listitem><para>Click Upload Volume.</para></listitem>
-        <listitem><para>Provide the following:</para>
-        <itemizedlist>
-            <listitem><para>Name and Description. Any desired name and a brief description that can be shown in the UI.</para></listitem>
-            <listitem><para>Availability Zone. Choose the zone where you want to store the volume. VMs running on hosts in this zone can attach the volume.</para></listitem>
-            <listitem><para>Format. Choose one of the following to indicate the disk image format of the volume.</para>
-            <informaltable>
-                <tgroup cols="2" align="left" colsep="1" rowsep="1">
-                    <thead>
-                        <row>
-                            <entry><para>Hypervisor</para></entry>
-                            <entry><para>Disk Image Format</para></entry>
-                        </row>                                    
-                    </thead>
-                    <tbody>
-                        <row>
-                            <entry><para>XenServer</para></entry>
-                            <entry><para>VHD</para></entry>
-                        </row>
-                        <row>
-                            <entry><para>VMware</para></entry>
-                            <entry><para>OVA</para></entry>
-                        </row>
-                        <row>
-                            <entry><para>KVM</para></entry>
-                            <entry><para>QCOW2</para></entry>
-                        </row>
-                        <!--                        <row>
-                            <entry><para>OVM</para></entry>
-                            <entry><para>RAW</para></entry>
-                        </row> -->
-                    </tbody>
-                </tgroup>
-            </informaltable></listitem>                                
-            <listitem><para>URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The type of file at the URL must match the value chosen in Format. For example, if Format is VHD, the URL might look like the following:</para>
-                <para>http://yourFileServerIP/userdata/myDataDisk.vhd</para></listitem>
-            <listitem><para>MD5 checksum. (Optional) Use the hash that you created in step <xref linkend="optional-hash"/>.</para></listitem>
-        </itemizedlist>
-        </listitem>
-        <listitem><para>Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step <xref linkend="optional-hash"/>, and make sure the status is Uploaded.</para></listitem>
-    </orderedlist>
-=======
   <title>Uploading an Existing Volume to a Virtual Machine</title>
   <para>Existing data can be made accessible to a virtual machine. This is called uploading a volume
     to the VM. For example, this is useful to upload data from a local file system and attach it to
@@ -160,5 +108,4 @@
         Volumes, find the name you specified in step 5, and make sure the status is Uploaded.</para>
     </listitem>
   </orderedlist>
->>>>>>> 9cb9f45... CLOUDSTACK-1313
 </section>


[24/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
Include the solidfire plugin into componentContext

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/a24b8d8d
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/a24b8d8d
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/a24b8d8d

Branch: refs/heads/vmsync
Commit: a24b8d8def76d1a9f1d55448c810cdf140f3a675
Parents: 1e0f833
Author: Prasanna Santhanam <ts...@apache.org>
Authored: Sat Jun 29 16:51:27 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 16:51:27 2013 +0530

----------------------------------------------------------------------
 client/tomcatconf/componentContext.xml.in | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a24b8d8d/client/tomcatconf/componentContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in
index a149327..966faf5 100644
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@ -98,12 +98,14 @@
   It determines whether or not a adapter is activated or how it is loaded in order in its managing provider,
 
 -->
-  <bean id="CloudStackImageStoreProviderImpl"
+  <bean id="cloudStackImageStoreProviderImpl"
         class="org.apache.cloudstack.storage.datastore.provider.CloudStackImageStoreProviderImpl"/>
-  <bean id="S3ImageStoreProviderImpl"
+  <bean id="s3ImageStoreProviderImpl"
         class="org.apache.cloudstack.storage.datastore.provider.S3ImageStoreProviderImpl"/>
-  <bean id="SwiftImageStoreProviderImpl"
+  <bean id="swiftImageStoreProviderImpl"
         class="org.apache.cloudstack.storage.datastore.provider.SwiftImageStoreProviderImpl"/>
+  <bean id="solidFireDataStoreProvider"
+        class="org.apache.cloudstack.storage.datastore.provider.SolidfirePrimaryDataStoreProvider"/>
 
   <!--Storage Providers-->
   <bean id="dataStoreProviderManager"
@@ -111,9 +113,10 @@
     <property name="providers">
       <list merge="true">
         <ref bean="CloudStackPrimaryDataStoreProviderImpl"/>
-        <ref local="CloudStackImageStoreProviderImpl"/>
-        <ref local="S3ImageStoreProviderImpl"/>
-        <ref local="SwiftImageStoreProviderImpl"/>
+        <ref local="cloudStackImageStoreProviderImpl"/>
+        <ref local="s3ImageStoreProviderImpl"/>
+        <ref local="swiftImageStoreProviderImpl"/>
+        <ref local="solidFireDataStoreProvider"/>
       </list>
     </property>
   </bean>


[28/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3260
Fixing NPE.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/e8ea6b1a
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/e8ea6b1a
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/e8ea6b1a

Branch: refs/heads/vmsync
Commit: e8ea6b1abdb78e8f789571d562cd2d37f03f336a
Parents: 15a6844
Author: Sateesh Chodapuneedi <sa...@apache.org>
Authored: Sun Jun 30 08:59:41 2013 +0530
Committer: Sateesh Chodapuneedi <sa...@apache.org>
Committed: Sun Jun 30 08:59:41 2013 +0530

----------------------------------------------------------------------
 .../hypervisor/vmware/manager/VmwareStorageManagerImpl.java      | 4 ++++
 .../src/com/cloud/hypervisor/vmware/resource/VmwareResource.java | 4 +++-
 vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java  | 2 +-
 3 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e8ea6b1a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 7b01d06..fee3e0a 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -74,6 +74,7 @@ import com.cloud.utils.Pair;
 import com.cloud.utils.StringUtils;
 import com.cloud.utils.Ternary;
 import com.cloud.utils.script.Script;
+import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.snapshot.VMSnapshot;
 import com.vmware.vim25.ManagedObjectReference;
@@ -1090,6 +1091,9 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
     private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName) throws Exception {
         String datastoreVolumePath = dsMo.searchFileInSubFolders(volumeFileName, true);
         assert (datastoreVolumePath != null) : "Virtual disk file missing from datastore.";
+        if (datastoreVolumePath == null) {
+            throw new CloudRuntimeException("Unable to find file " + volumeFileName + " in datastore " + dsMo.getName());
+        }
         return datastoreVolumePath;
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e8ea6b1a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 52f4190..968e095 100755
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -3984,7 +3984,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
             DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs);
             String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true);
             assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations.";
-
+            if (datastoreVolumePath == null) {
+                throw new CloudRuntimeException("Unable to find file " + cmd.getVolumePath() + ".vmdk in datastore " + dsMo.getName());
+            }
             AttachVolumeAnswer answer = new AttachVolumeAnswer(cmd, cmd.getDeviceId(), datastoreVolumePath);
             if (cmd.getAttach()) {
                 vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/e8ea6b1a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java
----------------------------------------------------------------------
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java
index 75553ae..a1f2506 100755
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java
@@ -335,7 +335,7 @@ public class DatastoreMO extends BaseMO {
 
         HostDatastoreBrowserMO browserMo = getHostDatastoreBrowserMO();
         ArrayList<HostDatastoreBrowserSearchResults> results = browserMo.searchDatastoreSubFolders("[" + getName() + "]", fileName, caseInsensitive);
-        if (results.size() > 1) {
+        if (results != null && results.size() > 1) {
             s_logger.warn("Multiple files with name " + fileName + " exists in datastore " + datastorePath + ". Trying to choose first file found in search attempt.");
         }
         for (HostDatastoreBrowserSearchResults result : results) {


[14/50] [abbrv] git commit: updated refs/heads/vmsync to f737019

Posted by ah...@apache.org.
CLOUDSTACK-3023: added tags for all the test cases

Signed-off-by: Prasanna Santhanam <ts...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/56a001d1
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/56a001d1
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/56a001d1

Branch: refs/heads/vmsync
Commit: 56a001d1c49221397621759d784bd6000b38f5e4
Parents: f3e3048
Author: suresh sadhu <su...@citrix.com>
Authored: Thu Jun 27 12:45:06 2013 +0530
Committer: Prasanna Santhanam <ts...@apache.org>
Committed: Sat Jun 29 10:03:51 2013 +0530

----------------------------------------------------------------------
 test/integration/component/test_ldap.py | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/56a001d1/test/integration/component/test_ldap.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_ldap.py b/test/integration/component/test_ldap.py
index 1b933db..fc3bd48 100644
--- a/test/integration/component/test_ldap.py
+++ b/test/integration/component/test_ldap.py
@@ -185,7 +185,7 @@ class TestLdap(cloudstackTestCase):
             raise Exception("Warning: Exception during cleanup : %s" % e)
         return
 
-
+    @attr(tags=["advanced", "basic"])
     def test_01_configLDAP(self):
         '''
         This test is to verify ldapConfig API  with valid  values.(i.e query fileter as email)
@@ -215,7 +215,7 @@ class TestLdap(cloudstackTestCase):
 
         self.debug("end test")
 
-
+    @attr(tags=["advanced", "basic"])
     def test_02_configLDAP(self):
         '''
         This test is to verify ldapConfig API  with valid  values.(i.e query fileter as displayName)
@@ -236,7 +236,7 @@ class TestLdap(cloudstackTestCase):
             self.debug("LDAP Configuration failed with exception")
         self.debug("end test")
 
-
+    @attr(tags=["advanced", "basic"])
     def test_03_configLDAP(self):
 
         '''
@@ -250,7 +250,7 @@ class TestLdap(cloudstackTestCase):
         self.ldapconfRes=self._testldapConfig(self.services["ldapCon_3"])
         self.assertEquals(self.ldapconfRes,0,"LDAP configuration successful with invalid value.API failed")
         self.debug("end test")
-
+    @attr(tags=["advanced", "basic"])
     def test_04_configLDAP(self):
         '''
         This test is to verify ldapConfig API with invalid configuration values(by passing wrong query filter)
@@ -263,7 +263,7 @@ class TestLdap(cloudstackTestCase):
         self.assertEquals(self.ldapconfRes,0,"API failed")
 
 
-
+    @attr(tags=["advanced", "basic"])
     def test_05_configLDAP(self):
 
         '''
@@ -281,6 +281,7 @@ class TestLdap(cloudstackTestCase):
         self.assertNotEqual(loginRes,1,"login API failed")
         self.debug("end test")
 
+    @attr(tags=["advanced", "basic"])
     def test_06_removeLDAP(self):
         '''
         This test is to verify ldapRemove API functionality