You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by sa...@apache.org on 2013/07/08 20:20:08 UTC

git commit: updated refs/heads/master-6-17-stable to 7054db4

Updated Branches:
  refs/heads/master-6-17-stable 3b8c3e7bf -> 7054db447


CLOUDSTACK-2571 Zone Wide Primary Storage blocker issues while Enabling in Maintenance State

Conflicts:

	engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java
	engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
	server/src/com/cloud/storage/StorageManagerImpl.java
	server/src/com/cloud/storage/StoragePoolAutomationImpl.java

Signed-off-by: Sateesh Chodapuneedi <sa...@apache.org>

CLOUDSTACK-2571
Fixing white spaces.


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/7054db44
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/7054db44
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/7054db44

Branch: refs/heads/master-6-17-stable
Commit: 7054db44700ba813d513bf8e16b0266d308a8dc6
Parents: 3b8c3e7
Author: Rajesh Battala <ra...@citrix.com>
Authored: Thu Jun 20 16:43:23 2013 +0530
Committer: Sateesh Chodapuneedi <sa...@apache.org>
Committed: Mon Jul 8 23:49:12 2013 +0530

----------------------------------------------------------------------
 .../datastore/db/PrimaryDataStoreDao.java       |  5 +-
 .../datastore/db/PrimaryDataStoreDaoImpl.java   | 37 +++++++------
 .../com/cloud/storage/StorageManagerImpl.java   | 56 +++++---------------
 .../storage/StoragePoolAutomationImpl.java      | 55 ++++++++++++++-----
 4 files changed, 75 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7054db44/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java
index d436762..699ef3b 100644
--- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java
+++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java
@@ -16,7 +16,6 @@
 // under the License.
 package org.apache.cloudstack.storage.datastore.db;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
@@ -38,8 +37,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
 	/**
 	 * @param datacenterId -- the id of the datacenter (availability zone)
 	 */
-	List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope);
-    
+	List<StoragePoolVO> listBy(long datacenterId, Long podId, Long clusterId, ScopeType scope);
+
 	/**
 	 * Set capacity of storage pool in bytes
 	 * @param id pool id.

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7054db44/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
index d461d58..e95041d 100644
--- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
+++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
@@ -28,14 +28,13 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
-import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
 import org.springframework.stereotype.Component;
 
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
+
 import com.cloud.host.Status;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
-
 import com.cloud.storage.StoragePoolStatus;
-
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GenericDaoBase;
 import com.cloud.utils.db.GenericSearchBuilder;
@@ -76,7 +75,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
         AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ);
         AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
         AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
-        AllFieldSearch.done();  
+        AllFieldSearch.done();
         
     	DcPodSearch = createSearchBuilder();
     	DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
@@ -104,7 +103,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
         DeleteLvmSearch.and().op("LVM", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
         DeleteLvmSearch.or("Filesystem", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
         DeleteLvmSearch.cp();
-        DeleteLvmSearch.done();        
+        DeleteLvmSearch.done();
 
         
         
@@ -198,26 +197,26 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
         return findOneBy(sc);
     }
 
-	@Override
-	public List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope) {
-	    if (clusterId != null) {
-    		SearchCriteria<StoragePoolVO> sc = DcPodSearch.create();
+    @Override
+    public List<StoragePoolVO> listBy(long datacenterId, Long podId, Long clusterId, ScopeType scope) {
+        if (clusterId != null) {
+            SearchCriteria<StoragePoolVO> sc = DcPodSearch.create();
             sc.setParameters("datacenterId", datacenterId);
             sc.setParameters("podId", podId);
             sc.setParameters("status", Status.Up);
             sc.setParameters("scope", scope);
-           
+
             sc.setParameters("cluster", clusterId);
             return listBy(sc);
-	    } else {
-	        SearchCriteria<StoragePoolVO> sc = DcPodAnyClusterSearch.create();
-	        sc.setParameters("datacenterId", datacenterId);
-	        sc.setParameters("podId", podId);
-	        sc.setParameters("status", Status.Up);
-	        sc.setParameters("scope", scope);
-	        return listBy(sc);
-	    }
-	}
+        } else {
+            SearchCriteria<StoragePoolVO> sc = DcPodAnyClusterSearch.create();
+            sc.setParameters("datacenterId", datacenterId);
+            sc.setParameters("podId", podId);
+            sc.setParameters("status", Status.Up);
+            sc.setParameters("scope", scope);
+            return listBy(sc);
+        }
+    }
 
 	@Override
 	public List<StoragePoolVO> listPoolByHostPath(String host, String path) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7054db44/server/src/com/cloud/storage/StorageManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 1b5fcc5..a6a68c4 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -40,7 +40,9 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
-import com.cloud.server.ConfigurationServer;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
 import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
 import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
 import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
@@ -66,22 +68,17 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
-import com.cloud.agent.api.BackupSnapshotCommand;
 import com.cloud.agent.api.CleanupSnapshotBackupCommand;
 import com.cloud.agent.api.Command;
-import com.cloud.agent.api.ManageSnapshotCommand;
 import com.cloud.agent.api.StoragePoolInfo;
 import com.cloud.agent.api.storage.DeleteTemplateCommand;
 import com.cloud.agent.api.storage.DeleteVolumeCommand;
 import com.cloud.agent.manager.Commands;
 import com.cloud.alert.AlertManager;
 import com.cloud.api.ApiDBUtils;
-import com.cloud.async.AsyncJobManager;
 import com.cloud.capacity.Capacity;
 import com.cloud.capacity.CapacityManager;
 import com.cloud.capacity.CapacityState;
@@ -92,7 +89,6 @@ import com.cloud.cluster.ManagementServerHostVO;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.ConfigurationManager;
 import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.consoleproxy.ConsoleProxyManager;
 import com.cloud.dc.ClusterVO;
 import com.cloud.dc.DataCenterVO;
 import com.cloud.dc.HostPodVO;
@@ -119,11 +115,10 @@ import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.hypervisor.HypervisorGuruManager;
 import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
-import com.cloud.network.NetworkModel;
 import com.cloud.org.Grouping;
 import com.cloud.org.Grouping.AllocationState;
-import com.cloud.resource.ResourceManager;
 import com.cloud.resource.ResourceState;
+import com.cloud.server.ConfigurationServer;
 import com.cloud.server.ManagementServer;
 import com.cloud.server.StatsCollector;
 import com.cloud.service.dao.ServiceOfferingDao;
@@ -142,18 +137,15 @@ import com.cloud.storage.dao.VMTemplateS3Dao;
 import com.cloud.storage.dao.VMTemplateSwiftDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.dao.VolumeHostDao;
-import com.cloud.storage.download.DownloadMonitor;
 import com.cloud.storage.listener.StoragePoolMonitor;
 import com.cloud.storage.listener.VolumeStateListener;
 import com.cloud.storage.s3.S3Manager;
 import com.cloud.storage.secondary.SecondaryStorageVmManager;
 import com.cloud.storage.snapshot.SnapshotManager;
-import com.cloud.storage.snapshot.SnapshotScheduler;
 import com.cloud.tags.dao.ResourceTagDao;
 import com.cloud.template.TemplateManager;
 import com.cloud.user.Account;
 import com.cloud.user.AccountManager;
-import com.cloud.user.ResourceLimitService;
 import com.cloud.user.User;
 import com.cloud.user.UserContext;
 import com.cloud.user.dao.AccountDao;
@@ -178,12 +170,9 @@ import com.cloud.vm.DiskProfile;
 import com.cloud.vm.UserVmManager;
 import com.cloud.vm.VMInstanceVO;
 import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.VirtualMachineManager;
 import com.cloud.vm.VirtualMachineProfile;
 import com.cloud.vm.VirtualMachineProfileImpl;
 import com.cloud.vm.dao.ConsoleProxyDao;
-import com.cloud.vm.dao.DomainRouterDao;
-import com.cloud.vm.dao.SecondaryStorageVmDao;
 import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDao;
 
@@ -338,7 +327,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
     private int _customDiskOfferingMinSize = 1;
     private int _customDiskOfferingMaxSize = 1024;
-    private Map<String, HypervisorHostListener> hostListeners = new HashMap<String, HypervisorHostListener>();
+    private final Map<String, HypervisorHostListener> hostListeners = new HashMap<String, HypervisorHostListener>();
 
     private boolean _recreateSystemVmEnabled;
 
@@ -473,7 +462,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
         	final List<StoragePool> poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1);
         	if (poolList != null && !poolList.isEmpty()) {
-        		return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
+                return (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
         	}
         }
         return null;
@@ -709,7 +698,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                     pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid());
                 }
             }
-            DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
+            DataStoreProvider provider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
             DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
             if (pool == null) {
                 Map<String, Object> params = new HashMap<String, Object>();
@@ -726,7 +715,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
                 store = lifeCycle.initialize(params);
             } else {
-                store = (DataStore) dataStoreMgr.getDataStore(pool.getId(),
+                store = dataStoreMgr.getDataStore(pool.getId(),
                         DataStoreRole.Primary);
             }
 
@@ -737,7 +726,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e);
         }
 
-        return (DataStore) dataStoreMgr.getDataStore(store.getId(),
+        return dataStoreMgr.getDataStore(store.getId(),
                 DataStoreRole.Primary);
     }
 
@@ -932,7 +921,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                 // force expunge non-destroyed volumes
                 List<VolumeVO> vols = _volsDao.listVolumesToBeDestroyed();
                 for (VolumeVO vol : vols) {
-                    AsyncCallFuture<VolumeApiResult> future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId()));
+                    AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(vol.getId()));
                     try {
                         future.get();
                     } catch (InterruptedException e) {
@@ -978,7 +967,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
     @Override
     public void connectHostToSharedPool(long hostId, long poolId)
             throws StorageUnavailableException {
-        StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
+        StoragePool pool = (StoragePool)dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
         assert (pool.isShared()) : "Now, did you actually read the name of this method?";
         s_logger.debug("Adding pool " + pool.getName() + " to  host " + hostId);
 
@@ -1140,7 +1129,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
                     for (VolumeVO vol : vols) {
                         try {
 
-                            this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId()));
+                            volService.expungeVolumeAsync(volFactory.getVolume(vol.getId()));
 
                         } catch (Exception e) {
                             s_logger.warn("Unable to destroy " + vol.getId(), e);
@@ -1418,7 +1407,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
 
         boolean restart = true;
         StoragePoolVO primaryStorage = null;
-
         primaryStorage = _storagePoolDao.findById(primaryStorageId);
 
         if (primaryStorage == null) {
@@ -1427,24 +1415,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
             throw new InvalidParameterValueException(msg);
         }
 
-        List<StoragePoolVO> spes = _storagePoolDao.listBy(
-                primaryStorage.getDataCenterId(), primaryStorage.getPodId(),
-                primaryStorage.getClusterId(), ScopeType.CLUSTER);
-        for (StoragePoolVO sp : spes) {
-            if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
-                throw new CloudRuntimeException(
-                        "Only one storage pool in a cluster can be in PrepareForMaintenance mode, "
-                                + sp.getId()
-                                + " is already in  PrepareForMaintenance mode ");
-            }
-        }
-
-        if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)
-                && !primaryStorage.getStatus().equals(
-                        StoragePoolStatus.ErrorInMaintenance)) {
-            throw new InvalidParameterValueException("Primary storage with id "
-                    + primaryStorageId
-                    + " is not ready for migration, as the status is:"
+        if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) {
+            throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:"
                     + primaryStorage.getStatus().toString());
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7054db44/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
index 9bba979..b656f30 100644
--- a/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
+++ b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java
@@ -18,20 +18,20 @@
  */
 package com.cloud.storage;
 
+import java.util.ArrayList;
 import java.util.List;
 
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
@@ -108,11 +108,39 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
         Long userId = UserContext.current().getCallerUserId();
         User user = _userDao.findById(userId);
         Account account = UserContext.current().getCaller();
-        StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
+        StoragePoolVO pool = primaryDataStoreDao.findById(store.getId());
         try {
+            List<StoragePoolVO> spes = null;
+            // Handling Zone and Cluster wide storage scopes.
+            // if the storage is ZONE wide then we pass podid and cluster id as null as they will be empty for ZWPS
+            if (pool.getScope() == ScopeType.ZONE) {
+                spes = primaryDataStoreDao.listBy(
+                        pool.getDataCenterId(), null,
+                        null, ScopeType.ZONE);
+            }
+            else {
+                spes = primaryDataStoreDao.listBy(
+                        pool.getDataCenterId(), pool.getPodId(),
+                        pool.getClusterId(), ScopeType.CLUSTER);
+            }
+            for (StoragePoolVO sp : spes) {
+                if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
+                    throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId()
+                            + " is already in  PrepareForMaintenance mode ");
+                }
+            }
             StoragePool storagePool = (StoragePool) store;
-            List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
+
+            //Handeling the Zone wide and cluster wide primay storage
+            List<HostVO> hosts = new ArrayList<HostVO>();
+            // if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand
+            if (pool.getScope().equals(ScopeType.ZONE)) {
+                hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor() , pool.getDataCenterId());
+            } else {
+                hosts = _resourceMgr.listHostsInClusterByStatus(
                     pool.getClusterId(), Status.Up);
+            }
+
             if (hosts == null || hosts.size() == 0) {
                 pool.setStatus(StoragePoolStatus.Maintenance);
                 primaryDataStoreDao.update(pool.getId(), pool);
@@ -135,7 +163,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
                     }
                 } else {
                     if (s_logger.isDebugEnabled()) {
-                        s_logger.debug("ModifyStoragePool false secceeded");
+                        s_logger.debug("ModifyStoragePool false succeeded");
                     }
                 }
             }
@@ -151,7 +179,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
             }
 
             // 2. Get a list of all the ROOT volumes within this storage pool
-            List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
+            List<VolumeVO> allVolumes = volumeDao.findByPoolId(pool
                     .getId());
 
             // 3. Enqueue to the work queue
@@ -222,7 +250,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
 
                     if (restart) {
 
-                        if (this.vmMgr.advanceStart(consoleProxy, null, user,
+                        if (vmMgr.advanceStart(consoleProxy, null, user,
                                 account) == null) {
                             String errorMsg = "There was an error starting the console proxy id: "
                                     + vmInstance.getId()
@@ -315,12 +343,11 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
                     }
                 }
             }
-            
         } catch(Exception e) {
             s_logger.error(
                     "Exception in enabling primary storage maintenance:", e);
             pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
-            this.primaryDataStoreDao.update(pool.getId(), pool);
+            primaryDataStoreDao.update(pool.getId(), pool);
             throw new CloudRuntimeException(e.getMessage());
         }
         return true;
@@ -332,10 +359,10 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
         Long userId = UserContext.current().getCallerUserId();
         User user = _userDao.findById(userId);
         Account account = UserContext.current().getCaller();
-        StoragePoolVO poolVO = this.primaryDataStoreDao
+        StoragePoolVO poolVO = primaryDataStoreDao
                 .findById(store.getId());
         StoragePool pool = (StoragePool)store;
-       
+
         List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
                 pool.getClusterId(), Status.Up);
         if (hosts == null || hosts.size() == 0) {