You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ed...@apache.org on 2013/01/15 03:04:54 UTC

[18/44] Revert "Merge remote-tracking branch 'origin/javelin' into javelin"

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
index d0413e3..e55cce0 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java
@@ -1,82 +1,97 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.datastore.driver;
 
-import java.util.Set;
+import java.util.Map;
 
-import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
-import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
-import org.apache.cloudstack.storage.snapshot.SnapshotInfo;
-import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver;
+import org.apache.cloudstack.storage.EndPoint;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
+import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.volume.VolumeObject;
 
 public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
 
+
+	@Override
+	public String grantAccess(VolumeObject vol, EndPoint ep) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public boolean revokeAccess(VolumeObject vol, EndPoint ep) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
     @Override
-    public String grantAccess(DataObject data,
-            org.apache.cloudstack.engine.subsystem.api.storage.EndPoint ep) {
+    public long getCapacity() {
         // TODO Auto-generated method stub
-        return null;
+        return 0;
     }
 
     @Override
-    public boolean revokeAccess(DataObject data,
-            org.apache.cloudstack.engine.subsystem.api.storage.EndPoint ep) {
+    public long getAvailableCapacity() {
         // TODO Auto-generated method stub
-        return false;
+        return 0;
     }
 
     @Override
-    public Set<DataObject> listObjects(DataStore store) {
+    public boolean initialize(Map<String, String> params) {
         // TODO Auto-generated method stub
-        return null;
+        return false;
     }
 
     @Override
-    public void createAsync(DataObject data,
-            AsyncCompletionCallback<CreateCmdResult> callback) {
+    public boolean grantAccess(EndPoint ep) {
         // TODO Auto-generated method stub
-        
+        return false;
     }
 
     @Override
-    public void deleteAsync(
-            DataObject data,
-            AsyncCompletionCallback<org.apache.cloudstack.engine.subsystem.api.storage.CommandResult> callback) {
+    public boolean revokeAccess(EndPoint ep) {
         // TODO Auto-generated method stub
-        
+        return false;
     }
 
     @Override
-    public void copyAsync(DataObject srcdata, DataObject destData,
-            AsyncCompletionCallback<CopyCommandResult> callback) {
+    public void setDataStore(PrimaryDataStore dataStore) {
         // TODO Auto-generated method stub
         
     }
 
     @Override
-    public boolean canCopy(DataObject srcData, DataObject destData) {
+    public void createVolumeFromBaseImageAsync(VolumeObject volume, TemplateOnPrimaryDataStoreInfo template, AsyncCompletionCallback<CommandResult> callback) {
         // TODO Auto-generated method stub
-        return false;
+        
     }
 
     @Override
-    public void takeSnapshot(
-            SnapshotInfo snapshot,
-            AsyncCompletionCallback<org.apache.cloudstack.engine.subsystem.api.storage.CommandResult> callback) {
+    public void createVolumeAsync(VolumeObject vol, AsyncCompletionCallback<CommandResult> callback) {
         // TODO Auto-generated method stub
         
     }
 
     @Override
-    public void revertSnapshot(
-            SnapshotInfo snapshot,
-            AsyncCompletionCallback<org.apache.cloudstack.engine.subsystem.api.storage.CommandResult> callback) {
+    public void deleteVolumeAsync(VolumeObject vo, AsyncCompletionCallback<CommandResult> callback) {
         // TODO Auto-generated method stub
         
     }
 
-
-	
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
index 9bf8fa7..bcffbd3 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java
@@ -1,5 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.datastore.provider;
 
+import java.util.List;
+
+import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore;
+import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
+import org.apache.cloudstack.storage.datastore.configurator.PrimaryDataStoreConfigurator;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
+import org.apache.cloudstack.storage.datastore.driver.SolidfirePrimaryDataStoreDriver;
+import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl;
+import org.springframework.beans.factory.annotation.Qualifier;
 import org.springframework.stereotype.Component;
 
 @Component
@@ -8,8 +33,8 @@ public class SolidfirePrimaryDataStoreProvider extends
 	private final String name = "Solidfre Primary Data Store Provider";
 
 
-	public SolidfirePrimaryDataStoreProvider() {
-	    
+	public SolidfirePrimaryDataStoreProvider(@Qualifier("solidfire") List<PrimaryDataStoreConfigurator> configurators) {
+	    super(configurators);
 		
 		// TODO Auto-generated constructor stub
 	}
@@ -19,5 +44,21 @@ public class SolidfirePrimaryDataStoreProvider extends
 		return name;
 	}
 	
-	
+	@Override
+	public PrimaryDataStore getDataStore(long dataStoreId) {
+		PrimaryDataStoreVO dsv = dataStoreDao.findById(dataStoreId);
+        if (dsv == null) {
+            return null;
+        }
+
+        DefaultPrimaryDataStore pds = DefaultPrimaryDataStore.createDataStore(dsv);
+        SolidfirePrimaryDataStoreDriver driver = new SolidfirePrimaryDataStoreDriver();
+        pds.setDriver(driver);
+
+        
+        DefaultPrimaryDataStoreLifeCycleImpl lifeCycle = new DefaultPrimaryDataStoreLifeCycleImpl(dataStoreDao);
+
+        pds.setLifeCycle(lifeCycle);
+        return pds;
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java
index ba356e3..63669c4 100644
--- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java
+++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java
@@ -1,3 +1,19 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.test;
 
 import org.aspectj.lang.ProceedingJoinPoint;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
index 6a7b5ad..eb6fe45 100644
--- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
+++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
@@ -1,3 +1,19 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.test;
 
 import org.apache.cloudstack.storage.image.motion.ImageMotionService;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java
index 42cd8fb..2c6092d 100644
--- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java
+++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java
@@ -1,3 +1,19 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.test;
 
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
index 0b73694..f5035bf 100644
--- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
+++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java
@@ -1,5 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 package org.apache.cloudstack.storage.test;
 
+import static org.junit.Assert.*;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -10,10 +28,12 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
 import org.apache.cloudstack.storage.command.CreateVolumeAnswer;
 import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProviderManager;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -23,9 +43,9 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.dc.ClusterVO;
-import com.cloud.dc.DataCenter.NetworkType;
 import com.cloud.dc.DataCenterVO;
 import com.cloud.dc.HostPodVO;
+import com.cloud.dc.DataCenter.NetworkType;
 import com.cloud.dc.dao.ClusterDao;
 import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.dc.dao.HostPodDao;
@@ -53,8 +73,8 @@ public class VolumeTest {
 	DataCenterDao dcDao;
 	@Inject
 	PrimaryDataStoreDao primaryStoreDao;
-	//@Inject
-	//PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
+	@Inject
+	PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
 	@Inject
 	AgentManager agentMgr;
 	Long dcId;
@@ -114,16 +134,16 @@ public class VolumeTest {
 
 	private PrimaryDataStoreInfo createPrimaryDataStore() {
 		try {
-			//primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap<String, Object>());
-			//PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("Solidfre Primary Data Store Provider");
+			primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap<String, Object>());
+			PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("Solidfre Primary Data Store Provider");
 			Map<String, String> params = new HashMap<String, String>();
 			params.put("url", "nfs://test/test");
 			params.put("dcId", dcId.toString());
 			params.put("clusterId", clusterId.toString());
 			params.put("name", "my primary data store");
-			//PrimaryDataStoreInfo primaryDataStoreInfo = provider.registerDataStore(params);
-			return null;
-		} catch (Exception e) {
+			PrimaryDataStoreInfo primaryDataStoreInfo = provider.registerDataStore(params);
+			return primaryDataStoreInfo;
+		} catch (ConfigurationException e) {
 			return null;
 		}
 	}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/storage/volume/solidfire/test/resource/storageContext.xml
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/test/resource/storageContext.xml b/plugins/storage/volume/solidfire/test/resource/storageContext.xml
index 6800d8f..e4ba986 100644
--- a/plugins/storage/volume/solidfire/test/resource/storageContext.xml
+++ b/plugins/storage/volume/solidfire/test/resource/storageContext.xml
@@ -1,3 +1,21 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements. See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership. The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License. You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied. See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
 <?xml version="1.0" encoding="UTF-8"?>
 <beans xmlns="http://www.springframework.org/schema/beans"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java
----------------------------------------------------------------------
diff --git a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java
index 9d217e6..43d3566 100644
--- a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java
+++ b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java
@@ -30,7 +30,7 @@ import org.springframework.stereotype.Component;
 import com.cloud.server.ManagementServer;
 import com.cloud.user.UserAccount;
 import com.cloud.user.dao.UserAccountDao;
-import com.cloud.utils.component.ComponentLocator;
+
 import com.cloud.utils.exception.CloudRuntimeException;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java
----------------------------------------------------------------------
diff --git a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java
index 0bf650b..d2f4347 100644
--- a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java
+++ b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java
@@ -30,7 +30,7 @@ import org.springframework.stereotype.Component;
 import com.cloud.server.ManagementServer;
 import com.cloud.user.UserAccount;
 import com.cloud.user.dao.UserAccountDao;
-import com.cloud.utils.component.ComponentLocator;
+
 import com.cloud.utils.exception.CloudRuntimeException;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java
----------------------------------------------------------------------
diff --git a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java
index 26c33a5..1b29f69 100644
--- a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java
+++ b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java
@@ -23,40 +23,38 @@ import java.security.SecureRandom;
 import java.util.Map;
 
 import javax.ejb.Local;
+import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
 import org.apache.log4j.Logger;
 import org.bouncycastle.util.encoders.Base64;
 
-import com.cloud.server.ManagementServer;
-import com.cloud.servlet.CloudStartupServlet;
 import com.cloud.user.UserAccount;
 import com.cloud.user.dao.UserAccountDao;
-import com.cloud.utils.component.ComponentLocator;
-import com.cloud.utils.component.Inject;
 import com.cloud.utils.exception.CloudRuntimeException;
 
 @Local(value={UserAuthenticator.class})
 public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator {
-	public static final Logger s_logger = Logger.getLogger(SHA256SaltedUserAuthenticator.class);
-	
-	@Inject
-	private UserAccountDao _userAccountDao;
-	private static int s_saltlen = 20;
-
-	public boolean configure(String name, Map<String, Object> params)
-			throws ConfigurationException {
-		super.configure(name, params);
-		return true;
-	}
-	
-	/* (non-Javadoc)
-	 * @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map)
-	 */
-	@Override
-	public boolean authenticate(String username, String password,
-			Long domainId, Map<String, Object[]> requestParameters) {
-		if (s_logger.isDebugEnabled()) {
+    public static final Logger s_logger = Logger.getLogger(SHA256SaltedUserAuthenticator.class);
+
+    @Inject
+    private UserAccountDao _userAccountDao;
+    private static int s_saltlen = 20;
+
+    @Override
+    public boolean configure(String name, Map<String, Object> params)
+            throws ConfigurationException {
+        super.configure(name, params);
+        return true;
+    }
+
+    /* (non-Javadoc)
+     * @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map)
+     */
+    @Override
+    public boolean authenticate(String username, String password,
+            Long domainId, Map<String, Object[]> requestParameters) {
+        if (s_logger.isDebugEnabled()) {
             s_logger.debug("Retrieving user: " + username);
         }
         UserAccount user = _userAccountDao.getUserAccount(username, domainId);
@@ -64,59 +62,59 @@ public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator {
             s_logger.debug("Unable to find user with " + username + " in domain " + domainId);
             return false;
         }
-        
+
+        try {
+            String storedPassword[] = user.getPassword().split(":");
+            if (storedPassword.length != 2) {
+                s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator");
+                return false;
+            }
+            byte salt[] = Base64.decode(storedPassword[0]);
+            String hashedPassword = encode(password, salt);
+            return storedPassword[1].equals(hashedPassword);
+        } catch (NoSuchAlgorithmException e) {
+            throw new CloudRuntimeException("Unable to hash password", e);
+        } catch (UnsupportedEncodingException e) {
+            throw new CloudRuntimeException("Unable to hash password", e);
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see com.cloud.server.auth.UserAuthenticator#encode(java.lang.String)
+     */
+    @Override
+    public String encode(String password) {
+        // 1. Generate the salt
+        SecureRandom randomGen;
         try {
-	        String storedPassword[] = user.getPassword().split(":");
-	        if (storedPassword.length != 2) {
-	        	s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator");
-	        	return false;
-	        }
-	        byte salt[] = Base64.decode(storedPassword[0]);
-	        String hashedPassword = encode(password, salt);
-	        return storedPassword[1].equals(hashedPassword);
-		} catch (NoSuchAlgorithmException e) {
-			throw new CloudRuntimeException("Unable to hash password", e);
-		} catch (UnsupportedEncodingException e) {
-			throw new CloudRuntimeException("Unable to hash password", e);
-		}
-	}
-
-	/* (non-Javadoc)
-	 * @see com.cloud.server.auth.UserAuthenticator#encode(java.lang.String)
-	 */
-	@Override
-	public String encode(String password) {
-		// 1. Generate the salt
-		SecureRandom randomGen;
-		try {
-			randomGen = SecureRandom.getInstance("SHA1PRNG");
-		
-			byte salt[] = new byte[s_saltlen];
-			randomGen.nextBytes(salt);
-			
-			String saltString = new String(Base64.encode(salt));
-			String hashString = encode(password, salt);
-			
-			// 3. concatenate the two and return
-			return saltString + ":" + hashString;
-		} catch (NoSuchAlgorithmException e) {
-			throw new CloudRuntimeException("Unable to hash password", e);
-		} catch (UnsupportedEncodingException e) {
-			throw new CloudRuntimeException("Unable to hash password", e);
-		}
-	}
-
-	public String encode(String password, byte[] salt) throws UnsupportedEncodingException, NoSuchAlgorithmException {
-		byte[] passwordBytes = password.getBytes("UTF-8");
-		byte[] hashSource = new byte[passwordBytes.length + s_saltlen];
-		System.arraycopy(passwordBytes, 0, hashSource, 0, passwordBytes.length);
-		System.arraycopy(salt, 0, hashSource, passwordBytes.length, s_saltlen);
-		
-		// 2. Hash the password with the salt
-		MessageDigest md = MessageDigest.getInstance("SHA-256");
-		md.update(hashSource);
-		byte[] digest = md.digest();
-		
-		return new String(Base64.encode(digest));
-	}
+            randomGen = SecureRandom.getInstance("SHA1PRNG");
+
+            byte salt[] = new byte[s_saltlen];
+            randomGen.nextBytes(salt);
+
+            String saltString = new String(Base64.encode(salt));
+            String hashString = encode(password, salt);
+
+            // 3. concatenate the two and return
+            return saltString + ":" + hashString;
+        } catch (NoSuchAlgorithmException e) {
+            throw new CloudRuntimeException("Unable to hash password", e);
+        } catch (UnsupportedEncodingException e) {
+            throw new CloudRuntimeException("Unable to hash password", e);
+        }
+    }
+
+    public String encode(String password, byte[] salt) throws UnsupportedEncodingException, NoSuchAlgorithmException {
+        byte[] passwordBytes = password.getBytes("UTF-8");
+        byte[] hashSource = new byte[passwordBytes.length + s_saltlen];
+        System.arraycopy(passwordBytes, 0, hashSource, 0, passwordBytes.length);
+        System.arraycopy(salt, 0, hashSource, passwordBytes.length, s_saltlen);
+
+        // 2. Hash the password with the salt
+        MessageDigest md = MessageDigest.getInstance("SHA-256");
+        md.update(hashSource);
+        byte[] digest = md.digest();
+
+        return new String(Base64.encode(digest));
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 3abf731..4b5e3cf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -43,6 +43,7 @@
   </issueManagement>
 
   <properties>
+    <skipTests>true</skipTests>
     <cs.jdk.version>1.6</cs.jdk.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
 
@@ -88,7 +89,6 @@
     <cs.lang.version>2.6</cs.lang.version>
     <cs.commons-io.version>1.4</cs.commons-io.version>
     <cs.reflections.version>0.9.8</cs.reflections.version>
-    <skipTests>true</skipTests>
   </properties>
 
   <distributionManagement>
@@ -296,6 +296,36 @@
     <defaultGoal>install</defaultGoal>
     <pluginManagement>
       <plugins>
+        <!--This plugin's configuration is used to store Eclipse m2e settings
+          only. It has no influence on the Maven build itself. -->
+        <plugin>
+          <groupId>org.eclipse.m2e</groupId>
+          <artifactId>lifecycle-mapping</artifactId>
+          <version>1.0.0</version>
+          <configuration>
+            <lifecycleMappingMetadata>
+              <pluginExecutions>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>
+                      org.apache.maven.plugins
+                    </groupId>
+                    <artifactId>
+                      maven-antrun-plugin
+                    </artifactId>
+                    <versionRange>[1.7,)</versionRange>
+                    <goals>
+                      <goal>run</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore />
+                  </action>
+                </pluginExecution>
+              </pluginExecutions>
+            </lifecycleMappingMetadata>
+          </configuration>
+        </plugin>
         <plugin>
           <groupId>org.apache.tomcat.maven</groupId>
           <artifactId>tomcat7-maven-plugin</artifactId>
@@ -346,6 +376,7 @@
               <exclude>scripts/vm/systemvm/id_rsa.cloud</exclude>
               <exclude>tools/devcloud/basebuild/puppet-devcloudinitial/files/network.conf</exclude>
               <exclude>tools/devcloud/devcloud.cfg</exclude>
+              <exclude>tools/devcloud-kvm/devcloud-kvm.cfg</exclude>
               <exclude>ui/lib/flot/jquery.colorhelpers.js</exclude>
               <exclude>ui/lib/flot/jquery.flot.crosshair.js</exclude>
               <exclude>ui/lib/flot/jquery.flot.fillbetween.js</exclude>
@@ -472,6 +503,7 @@
         <module>developer</module>
         <module>tools/apidoc</module>
         <module>tools/devcloud</module>
+        <module>tools/devcloud-kvm</module>
         <module>tools/marvin</module>
         <module>tools/cli</module>
       </modules>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index b6d86e1..8592f27 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -90,6 +90,11 @@
       <artifactId>cloud-engine-api</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-api</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
   <build>
     <defaultGoal>install</defaultGoal>

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/server/src/com/cloud/agent/manager/AgentManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java
index ee5971f..77f131a 100755
--- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java
+++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java
@@ -66,7 +66,6 @@ import com.cloud.agent.transport.Response;
 import com.cloud.alert.AlertManager;
 import com.cloud.capacity.dao.CapacityDao;
 import com.cloud.cluster.ManagementServerNode;
-import com.cloud.cluster.StackMaid;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.dao.ConfigurationDao;
 import com.cloud.dc.ClusterDetailsDao;
@@ -107,7 +106,6 @@ import com.cloud.user.AccountManager;
 import com.cloud.utils.ActionDelegate;
 import com.cloud.utils.NumbersUtil;
 import com.cloud.utils.Pair;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.component.Manager;
 import com.cloud.utils.concurrency.NamedThreadFactory;
 import com.cloud.utils.db.DB;
@@ -151,7 +149,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
     protected List<Pair<Integer, StartupCommandProcessor>> _creationMonitors = new ArrayList<Pair<Integer, StartupCommandProcessor>>(17);
     protected List<Long> _loadingAgents = new ArrayList<Long>();
     protected int _monitorId = 0;
-    private Lock _agentStatusLock = new ReentrantLock();
+    private final Lock _agentStatusLock = new ReentrantLock();
 
     protected NioServer _connection;
     @Inject
@@ -195,10 +193,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
     @Inject
     protected VirtualMachineManager _vmMgr = null;
-    
+
     @Inject StorageService _storageSvr = null;
     @Inject StorageManager _storageMgr = null;
-    
+
     @Inject
     protected HypervisorGuruManager _hvGuruMgr;
 
@@ -222,11 +220,11 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
     protected ExecutorService _executor;
     protected ThreadPoolExecutor _connectExecutor;
-    
+
     protected StateMachine2<Status, Status.Event, Host> _statusStateMachine = Status.getStateMachine();
-    
+
     @Inject ResourceManager _resourceMgr;
-    
+
     @Override
     public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
         _name = name;
@@ -263,7 +261,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         _nodeId = ManagementServerNode.getManagementServerId();
         s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
-        
+
         long lastPing = (System.currentTimeMillis() >> 10) - _pingTimeout;
         _hostDao.markHostsAsDisconnected(_nodeId, lastPing);
 
@@ -276,7 +274,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                 new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentConnectTaskPool"));
         //allow core threads to time out even when there are no items in the queue
         _connectExecutor.allowCoreThreadTimeOut(true);
- 
+
         _connection = new NioServer("AgentManager", _port, workers + 10, this);
 
         s_logger.info("Listening on " + _port + " with " + workers + " workers");
@@ -395,7 +393,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         } else if ( ssHost.getType() == Host.Type.SecondaryStorage) {
             sendToSSVM(ssHost.getDataCenterId(), cmd, listener);
         } else {
-        	String err = "do not support Secondary Storage type " + ssHost.getType();
+            String err = "do not support Secondary Storage type " + ssHost.getType();
             s_logger.warn(err);
             throw new CloudRuntimeException(err);
         }
@@ -435,7 +433,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                 }
                 Answer answer = null;
                 try {
-                	
+
                     long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(host.getId(), cmd);
                     answer = easySend(targetHostId, cmd);
                 } catch (Exception e) {
@@ -552,7 +550,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         assert cmds.length > 0 : "Why are you sending zero length commands?";
         if (cmds.length == 0) {
-        	throw new AgentUnavailableException("Empty command set for agent " + agent.getId(), agent.getId());
+            throw new AgentUnavailableException("Empty command set for agent " + agent.getId(), agent.getId());
         }
         Request req = new Request(hostId, _nodeId, cmds, commands.stopOnError(), true);
         req.setSequence(agent.getNextSequence());
@@ -585,7 +583,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         if (removed != null) {
             removed.disconnect(nextState);
         }
-        
+
         for (Pair<Integer, Listener> monitor : _hostMonitors) {
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
@@ -593,7 +591,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             monitor.second().processDisconnect(hostId, nextState);
         }
     }
-    
+
     protected AgentAttache notifyMonitorsOfConnection(AgentAttache attache, final StartupCommand[] cmd, boolean forRebalance) throws ConnectionException {
         long hostId = attache.getId();
         HostVO host = _hostDao.findById(hostId);
@@ -678,7 +676,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             loadDirectlyConnectedHost(host, false);
         }
     }
-    
+
     private ServerResource loadResourcesWithoutHypervisor(HostVO host){
         String resourceName = host.getResource();
         ServerResource resource = null;
@@ -704,10 +702,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         if(resource != null){
             _hostDao.loadDetails(host);
-    
+
             HashMap<String, Object> params = new HashMap<String, Object>(host.getDetails().size() + 5);
             params.putAll(host.getDetails());
-    
+
             params.put("guid", host.getGuid());
             params.put("zone", Long.toString(host.getDataCenterId()));
             if (host.getPodId() != null) {
@@ -726,19 +724,19 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                     params.put("pool", guid);
                 }
             }
-    
+
             params.put("ipaddress", host.getPrivateIpAddress());
             params.put("secondary.storage.vm", "false");
             params.put("max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString()));
             params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString()));
-    
+
             try {
                 resource.configure(host.getName(), params);
             } catch (ConfigurationException e) {
                 s_logger.warn("Unable to configure resource due to " + e.getMessage());
                 return null;
             }
-    
+
             if (!resource.start()) {
                 s_logger.warn("Unable to start the resource");
                 return null;
@@ -746,13 +744,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         }
         return resource;
     }
-    
+
 
     @SuppressWarnings("rawtypes")
     protected boolean loadDirectlyConnectedHost(HostVO host, boolean forRebalance) {
-    	boolean initialized = false;
+        boolean initialized = false;
         ServerResource resource = null;
-    	try {
+        try {
             //load the respective discoverer
             Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType());
             if(discoverer == null){
@@ -761,20 +759,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             }else{
                 resource = discoverer.reloadResource(host);
             }
-            
+
             if(resource == null){
                 s_logger.warn("Unable to load the resource: "+ host.getId());
                 return false;
             }
-        
-	        initialized = true;
-    	} finally {
-    		if(!initialized) {
+
+            initialized = true;
+        } finally {
+            if(!initialized) {
                 if (host != null) {
                     agentStatusTransitTo(host, Event.AgentDisconnected, _nodeId);
                 }
-    		}	
-    	}
+            }	
+        }
 
         if (forRebalance) {
             Host h = _resourceMgr.createHostAndAgent(host.getId(), resource, host.getDetails(), false, null, true);
@@ -790,10 +788,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         if (resource instanceof DummySecondaryStorageResource || resource instanceof KvmDummyResourceBase) {
             return new DummyAttache(this, host.getId(), false);
         }
-        
+
         s_logger.debug("create DirectAgentAttache for " + host.getId());
         DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), resource, host.isInMaintenanceStates(), this);
-        
+
         AgentAttache old = null;
         synchronized (_agents) {
             old = _agents.put(host.getId(), attache);
@@ -804,7 +802,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         return attache;
     }
-    
+
     @Override
     public boolean stop() {
         if (_monitor != null) {
@@ -823,13 +821,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                         s_logger.debug("Cant not find host " + agent.getId());
                     }
                 } else {
-                	if (!agent.forForward()) {
-                		agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId);
-                	}
+                    if (!agent.forForward()) {
+                        agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId);
+                    }
                 }
             }
         }
-        
+
         _connectExecutor.shutdownNow();
         return true;
     }
@@ -838,7 +836,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
     public String getName() {
         return _name;
     }
-   
+
     protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) {
         long hostId = attache.getId();
 
@@ -863,7 +861,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                     s_logger.debug(err);
                     throw new CloudRuntimeException(err);
                 }
-                
+
                 if (s_logger.isDebugEnabled()) {
                     s_logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus);
                 }
@@ -876,15 +874,15 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         //remove the attache
         removeAgent(attache, nextStatus);
-        
+
         //update the DB
         if (host != null && transitState) {
-        	disconnectAgent(host, event, _nodeId);
+            disconnectAgent(host, event, _nodeId);
         }
 
         return true;
     }
-    
+
     protected boolean handleDisconnectWithInvestigation(AgentAttache attache, Status.Event event) {
         long hostId = attache.getId();
         HostVO host = _hostDao.findById(hostId);
@@ -898,7 +896,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                  * God knew what race condition the code dealt with! 
                  */
             }
-            
+
             if (nextStatus == Status.Alert) {
                 /* OK, we are going to the bad status, let's see what happened */
                 s_logger.info("Investigating why host " + hostId + " has disconnected with event " + event);
@@ -947,7 +945,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                 s_logger.debug("The next status of Agent " + host.getId() + " is not Alert, no need to investigate what happened");
             }
         }
-        
+
         handleDisconnectWithoutInvestigation(attache, event, true);
         host = _hostDao.findById(host.getId());
         if (host.getStatus() == Status.Alert || host.getStatus() == Status.Down) {
@@ -970,16 +968,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         @Override
         public void run() {
-        	try {
+            try {
                 if (_investigate == true) {
                     handleDisconnectWithInvestigation(_attache, _event);
                 } else {
-                	handleDisconnectWithoutInvestigation(_attache, _event, true);
+                    handleDisconnectWithoutInvestigation(_attache, _event, true);
                 }
             } catch (final Exception e) {
                 s_logger.error("Exception caught while handling disconnect: ", e);
-            } finally {
-                StackMaid.current().exitCleanup();
             }
         }
     }
@@ -1059,14 +1055,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
     @Override
     public boolean executeUserRequest(long hostId, Event event) throws AgentUnavailableException {
-         if (event == Event.AgentDisconnected) {
+        if (event == Event.AgentDisconnected) {
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("Received agent disconnect event for host " + hostId);
             }
             AgentAttache attache = null;
             attache = findAttache(hostId);
             if (attache != null) {
-            	handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
+                handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true);
             }
             return true;
         } else if (event == Event.ShutdownRequested) {
@@ -1079,7 +1075,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         s_logger.debug("create ConnectedAgentAttache for " + host.getId());
         AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), link, host.isInMaintenanceStates());
         link.attach(attache);
-        
+
         AgentAttache old = null;
         synchronized (_agents) {
             old = _agents.put(host.getId(), attache);
@@ -1090,36 +1086,36 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
         return attache;
     }
-    
+
     private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup, Request request) {
-    	AgentAttache attache = null;
-    	ReadyCommand ready = null;
-    	try {
-    		HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
-    		if (host != null) {
-    		    ready = new ReadyCommand(host.getDataCenterId(), host.getId());
-    			attache = createAttacheForConnect(host, link);
-    			attache = notifyMonitorsOfConnection(attache, startup, false);
-    		}
+        AgentAttache attache = null;
+        ReadyCommand ready = null;
+        try {
+            HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
+            if (host != null) {
+                ready = new ReadyCommand(host.getDataCenterId(), host.getId());
+                attache = createAttacheForConnect(host, link);
+                attache = notifyMonitorsOfConnection(attache, startup, false);
+            }
         } catch (Exception e) {
-        	s_logger.debug("Failed to handle host connection: " + e.toString());
-        	ready = new ReadyCommand(null);
-        	ready.setDetails(e.toString());
+            s_logger.debug("Failed to handle host connection: " + e.toString());
+            ready = new ReadyCommand(null);
+            ready.setDetails(e.toString());
         } finally {
             if (ready == null) {
                 ready = new ReadyCommand(null);
-    	}
+            }
         }
-        
+
         try {
-        if (attache == null) {
-        		final Request readyRequest = new Request(-1, -1, ready, false);
-        		link.send(readyRequest.getBytes());
-        	} else {
-        		easySend(attache.getId(), ready);
-        	}
+            if (attache == null) {
+                final Request readyRequest = new Request(-1, -1, ready, false);
+                link.send(readyRequest.getBytes());
+            } else {
+                easySend(attache.getId(), ready);
+            }
         } catch (Exception e) {
-        	s_logger.debug("Failed to send ready command:" + e.toString());
+            s_logger.debug("Failed to send ready command:" + e.toString());
         }
         return attache;
     }
@@ -1143,7 +1139,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                 if (s_logger.isDebugEnabled()) {
                     s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id);
                 }
-                
+
                 _resourceMgr.createHostAndAgent(id, resource, details, false, null, false);
             } catch (Exception e) {
                 s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e);
@@ -1151,7 +1147,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                 if (actionDelegate != null) {
                     actionDelegate.action(new Long(id));
                 }
-                StackMaid.current().exitCleanup();
             }
         }
     }
@@ -1174,32 +1169,32 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             for (int i = 0; i < _cmds.length; i++) {
                 startups[i] = (StartupCommand) _cmds[i];
             }
-        
+
             AgentAttache attache = handleConnectedAgent(_link, startups, _request);
             if (attache == null) {
                 s_logger.warn("Unable to create attache for agent: " + _request);
             }
         }
     }
-    
+
     protected void connectAgent(Link link, final Command[] cmds, final Request request) {
-    	//send startupanswer to agent in the very beginning, so agent can move on without waiting for the answer for an undetermined time, if we put this logic into another thread pool.
-    	StartupAnswer[] answers = new StartupAnswer[cmds.length];
-    	Command cmd;
-    	for (int i = 0; i < cmds.length; i++) {
-			cmd = cmds[i];
-			if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
-				answers[i] = new StartupAnswer((StartupCommand)cmds[i], 0, getPingInterval());
-				break;
-			}
-		}
-    	Response response = null;
-    	response = new Response(request, answers[0], _nodeId, -1); 
-    	 try {
-         	link.send(response.toBytes());
-         } catch (ClosedChannelException e) {
-         	s_logger.debug("Failed to send startupanswer: " + e.toString());
-         }        
+        //send startupanswer to agent in the very beginning, so agent can move on without waiting for the answer for an undetermined time, if we put this logic into another thread pool.
+        StartupAnswer[] answers = new StartupAnswer[cmds.length];
+        Command cmd;
+        for (int i = 0; i < cmds.length; i++) {
+            cmd = cmds[i];
+            if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
+                answers[i] = new StartupAnswer((StartupCommand)cmds[i], 0, getPingInterval());
+                break;
+            }
+        }
+        Response response = null;
+        response = new Response(request, answers[0], _nodeId, -1); 
+        try {
+            link.send(response.toBytes());
+        } catch (ClosedChannelException e) {
+            s_logger.debug("Failed to send startupanswer: " + e.toString());
+        }        
         _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request));
     }
 
@@ -1215,14 +1210,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             boolean logD = true;
 
             if (attache == null) {
-            	if (!(cmd instanceof StartupCommand)) {
-            		s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
-            	} else {
-            	    //submit the task for execution
-            	    request.logD("Scheduling the first command ");
-            	    connectAgent(link, cmds, request);
-            	}
-            	return;
+                if (!(cmd instanceof StartupCommand)) {
+                    s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
+                } else {
+                    //submit the task for execution
+                    request.logD("Scheduling the first command ");
+                    connectAgent(link, cmds, request);
+                }
+                return;
             }
 
             final long hostId = attache.getId();
@@ -1286,20 +1281,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                             if (cmd instanceof PingRoutingCommand) {
                                 boolean gatewayAccessible = ((PingRoutingCommand) cmd).isGatewayAccessible();
                                 HostVO host = _hostDao.findById(Long.valueOf(cmdHostId));
-                                
+
                                 if (host != null) {
-                                if (!gatewayAccessible) {
-                                    // alert that host lost connection to
-                                    // gateway (cannot ping the default route)
-                                    DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
-                                    HostPodVO podVO = _podDao.findById(host.getPodId());
-                                    String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
-
-                                    _alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc
-                                            + "] lost connection to gateway (default route) and is possibly having network connection issues.");
-                                } else {
-                                    _alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
-                                }
+                                    if (!gatewayAccessible) {
+                                        // alert that host lost connection to
+                                        // gateway (cannot ping the default route)
+                                        DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
+                                        HostPodVO podVO = _podDao.findById(host.getPodId());
+                                        String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
+
+                                        _alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc
+                                                + "] lost connection to gateway (default route) and is possibly having network connection issues.");
+                                    } else {
+                                        _alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
+                                    }
                                 } else {
                                     s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() +
                                             " for agent id=" + cmdHostId + "; can't find the host in the DB");
@@ -1382,7 +1377,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
                     }
                 }
             } finally {
-                StackMaid.current().exitCleanup();
                 txn.close();
             }
         }
@@ -1391,7 +1385,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
     protected AgentManagerImpl() {
     }
 
-	@Override
+    @Override
     public boolean tapLoadingAgents(Long hostId, TapAgentsAction action) {
         synchronized (_loadingAgents) {
             if (action == TapAgentsAction.Add) {
@@ -1406,58 +1400,58 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
         }  
         return true;
     }
-    
+
     @Override
     public boolean agentStatusTransitTo(HostVO host, Status.Event e, long msId) {
-		try {
-			_agentStatusLock.lock();
-			if (status_logger.isDebugEnabled()) {
-				ResourceState state = host.getResourceState();
-				StringBuilder msg = new StringBuilder("Transition:");
-				msg.append("[Resource state = ").append(state);
-				msg.append(", Agent event = ").append(e.toString());
-				msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]");
-				status_logger.debug(msg);
-			}
-
-			host.setManagementServerId(msId);
-			try {
-				return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
-			} catch (NoTransitionException e1) {
-				status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName()
-				        + ", mangement server id is " + msId);
-				throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is "
-				        + msId + "," + e1.getMessage());
-			}
-		} finally {
-			_agentStatusLock.unlock();
-		}
+        try {
+            _agentStatusLock.lock();
+            if (status_logger.isDebugEnabled()) {
+                ResourceState state = host.getResourceState();
+                StringBuilder msg = new StringBuilder("Transition:");
+                msg.append("[Resource state = ").append(state);
+                msg.append(", Agent event = ").append(e.toString());
+                msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]");
+                status_logger.debug(msg);
+            }
+
+            host.setManagementServerId(msId);
+            try {
+                return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
+            } catch (NoTransitionException e1) {
+                status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName()
+                        + ", mangement server id is " + msId);
+                throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is "
+                        + msId + "," + e1.getMessage());
+            }
+        } finally {
+            _agentStatusLock.unlock();
+        }
     }
-    
+
     public boolean disconnectAgent(HostVO host, Status.Event e, long msId) {
         host.setDisconnectedOn(new Date());
         if (e.equals(Status.Event.Remove)) {
             host.setGuid(null);
             host.setClusterId(null);
         }
-        
+
         return agentStatusTransitTo(host, e, msId);
     }
-    
+
     protected void disconnectWithoutInvestigation(AgentAttache attache, final Status.Event event) {
         _executor.submit(new DisconnectTask(attache, event, false));
     }
-    
+
     protected void disconnectWithInvestigation(AgentAttache attache, final Status.Event event) {
         _executor.submit(new DisconnectTask(attache, event, true));
     }
-    
+
     private void disconnectInternal(final long hostId, final Status.Event event, boolean invstigate) {
         AgentAttache attache = findAttache(hostId);
 
         if (attache != null) {
             if (!invstigate) {
-            	disconnectWithoutInvestigation(attache, event);
+                disconnectWithoutInvestigation(attache, event);
             } else {
                 disconnectWithInvestigation(attache, event);
             }
@@ -1470,35 +1464,35 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
 
             HostVO host = _hostDao.findById(hostId);
             if (host != null && host.getRemoved() == null) {
-               disconnectAgent(host, event, _nodeId);
+                disconnectAgent(host, event, _nodeId);
             }
         }
     }
-    
+
     public void disconnectWithInvestigation(final long hostId, final Status.Event event) {
         disconnectInternal(hostId, event, true);
     }
-    
+
     @Override
     public void disconnectWithoutInvestigation(final long hostId, final Status.Event event) {
         disconnectInternal(hostId, event, false);
     }
 
-	@Override
+    @Override
     public AgentAttache handleDirectConnectAgent(HostVO host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException {
-		AgentAttache attache;
-		
-		attache = createAttacheForDirectConnect(host, resource);
+        AgentAttache attache;
+
+        attache = createAttacheForDirectConnect(host, resource);
         StartupAnswer[] answers = new StartupAnswer[cmds.length];
         for (int i = 0; i < answers.length; i++) {
             answers[i] = new StartupAnswer(cmds[i], attache.getId(), _pingInterval);
         }
         attache.process(answers);
-		attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
-		
-		return attache;
+        attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
+
+        return attache;
     }
-	
+
     @Override
     public void pullAgentToMaintenance(long hostId) {
         AgentAttache attache = findAttache(hostId);
@@ -1508,15 +1502,15 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
             attache.cancelAllCommands(Status.Disconnected, false);
         }        
     }
-    
+
     @Override
     public void pullAgentOutMaintenance(long hostId) {
         AgentAttache attache = findAttache(hostId);
         if (attache != null) {
-        	attache.setMaintenanceMode(false);
+            attache.setMaintenanceMode(false);
         }
     }
-    
-    
-    
+
+
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
index 6753b28..25c7168 100755
--- a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
+++ b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java
@@ -62,7 +62,6 @@ import com.cloud.cluster.ClusterManagerListener;
 import com.cloud.cluster.ClusteredAgentRebalanceService;
 import com.cloud.cluster.ManagementServerHost;
 import com.cloud.cluster.ManagementServerHostVO;
-import com.cloud.cluster.StackMaid;
 import com.cloud.cluster.agentlb.AgentLoadBalancerPlanner;
 import com.cloud.cluster.agentlb.HostTransferMapVO;
 import com.cloud.cluster.agentlb.HostTransferMapVO.HostTransferState;
@@ -80,8 +79,6 @@ import com.cloud.resource.ServerResource;
 import com.cloud.storage.resource.DummySecondaryStorageResource;
 import com.cloud.utils.DateUtil;
 import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.component.Adapters;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.concurrency.NamedThreadFactory;
 import com.cloud.utils.db.SearchCriteria.Op;
 import com.cloud.utils.db.SearchCriteria2;
@@ -116,10 +113,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     protected ManagementServerHostDao _mshostDao;
     @Inject
     protected HostTransferMapDao _hostTransferDao;
-    
+
     // @com.cloud.utils.component.Inject(adapter = AgentLoadBalancerPlanner.class)
     @Inject protected List<AgentLoadBalancerPlanner> _lbPlanners;
-    
+
     @Inject
     protected AgentManager _agentMgr;
     @Inject ConfigurationDao _configDao;
@@ -133,7 +130,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         _peers = new HashMap<String, SocketChannel>(7);
         _sslEngines = new HashMap<String, SSLEngine>(7);
         _nodeId = _clusterMgr.getManagementNodeId();
-        
+
         s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
 
         Map<String, String> params = _configDao.getConfiguration(xmlParams);
@@ -143,7 +140,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         ClusteredAgentAttache.initialize(this);
 
         _clusterMgr.registerListener(this);
-        
+
         return super.configure(name, xmlParams);
     }
 
@@ -177,7 +174,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         List<HostVO> hosts = _hostDao.findAndUpdateDirectAgentToLoad(cutSeconds, _loadSize, _nodeId);
         List<HostVO> appliances = _hostDao.findAndUpdateApplianceToLoad(cutSeconds, _nodeId);
         hosts.addAll(appliances);
-        
+
         if (hosts != null && hosts.size() > 0) {
             s_logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them...");
             for (HostVO host : hosts) {
@@ -278,12 +275,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) {
         return handleDisconnect(attache, event, false, true);
     }
-    
+
     @Override
     protected boolean handleDisconnectWithInvestigation(AgentAttache attache, Status.Event event) {
         return handleDisconnect(attache, event, true, true);
     }
-    
+
     protected boolean handleDisconnect(AgentAttache agent, Status.Event event, boolean investigate, boolean broadcast) {
         boolean res;
         if (!investigate) {
@@ -292,14 +289,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             res = super.handleDisconnectWithInvestigation(agent, event);
         }
 
-		if (res) {
-			if (broadcast) {
-				notifyNodesInCluster(agent);
-			}
-			return true;
-		} else {
-			return false;
-		}
+        if (res) {
+            if (broadcast) {
+                notifyNodesInCluster(agent);
+            }
+            return true;
+        } else {
+            return false;
+        }
     }
 
     @Override
@@ -343,15 +340,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     public boolean reconnect(final long hostId) {
         Boolean result;
         try {
-	        result = _clusterMgr.propagateAgentEvent(hostId, Event.ShutdownRequested);
-	        if (result != null) {
-	            return result;
-	        }
+            result = _clusterMgr.propagateAgentEvent(hostId, Event.ShutdownRequested);
+            if (result != null) {
+                return result;
+            }
         } catch (AgentUnavailableException e) {
-	        s_logger.debug("cannot propagate agent reconnect because agent is not available", e);
-	        return false;
+            s_logger.debug("cannot propagate agent reconnect because agent is not available", e);
+            return false;
         }
-        
+
         return super.reconnect(hostId);
     }
 
@@ -413,7 +410,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
     public String findPeer(long hostId) {
         return _clusterMgr.getPeerName(hostId);
     }
-    
+
     public SSLEngine getSSLEngine(String peerName) {
         return _sslEngines.get(peerName);
     }
@@ -520,7 +517,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             }
         }
         if (agent == null) {
-        	AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId);
+            AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId);
             ex.addProxyObject(ApiDBUtils.findHostById(hostId).getUuid());
             throw ex;
         }
@@ -540,11 +537,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             }
         }
         _timer.cancel();
-        
+
         //cancel all transfer tasks
         s_transferExecutor.shutdownNow();
         cleanupTransferMap(_nodeId);
-        
+
         return super.stop();
     }
 
@@ -698,19 +695,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
     @Override
     public boolean executeRebalanceRequest(long agentId, long currentOwnerId, long futureOwnerId, Event event) throws AgentUnavailableException, OperationTimedoutException {
-    	boolean result = false;
+        boolean result = false;
         if (event == Event.RequestAgentRebalance) {
             return setToWaitForRebalance(agentId, currentOwnerId, futureOwnerId);
         } else if (event == Event.StartAgentRebalance) {
             try {
-            	result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
+                result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
             } catch (Exception e) {
                 s_logger.warn("Unable to rebalance host id=" + agentId, e);
             }
         }
         return result;
     }
-    
+
     @Override
     public void scheduleRebalanceAgents() {
         _timer.schedule(new AgentLoadBalancerTask(), 30000);
@@ -735,20 +732,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
         @Override
         public synchronized void run() {
-        	try {
-	            if (!cancelled) {
-	                startRebalanceAgents();
-	                if (s_logger.isInfoEnabled()) {
-	                    s_logger.info("The agent load balancer task is now being cancelled");
-	                }
-	                cancelled = true;
-	            }
-        	} catch(Throwable e) {
-        		s_logger.error("Unexpected exception " + e.toString(), e);
-        	}
+            try {
+                if (!cancelled) {
+                    startRebalanceAgents();
+                    if (s_logger.isInfoEnabled()) {
+                        s_logger.info("The agent load balancer task is now being cancelled");
+                    }
+                    cancelled = true;
+                }
+            } catch(Throwable e) {
+                s_logger.error("Unexpected exception " + e.toString(), e);
+            }
         }
     }
-   
+
     public void startRebalanceAgents() {
         s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
         List<ManagementServerHostVO> allMS = _mshostDao.listBy(ManagementServerHost.State.Up);
@@ -767,7 +764,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             }
             return;
         }
-        
+
         if (avLoad == 0L) {
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("As calculated average load is less than 1, rounding it to 1");
@@ -777,7 +774,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
         for (ManagementServerHostVO node : allMS) {
             if (node.getMsid() != _nodeId) {
-                
+
                 List<HostVO> hostsToRebalance = new ArrayList<HostVO>();
                 for (AgentLoadBalancerPlanner lbPlanner : _lbPlanners) {
                     hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad);
@@ -788,14 +785,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                     }
                 }
 
-                
+
                 if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
                     s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
                     for (HostVO host : hostsToRebalance) {
                         long hostId = host.getId();
                         s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
                         boolean result = true;
-                        
+
                         if (_hostTransferDao.findById(hostId) != null) {
                             s_logger.warn("Somebody else is already rebalancing host id: " + hostId);
                             continue;
@@ -867,7 +864,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                             for (Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext();) {
                                 Long hostId = iterator.next();
                                 AgentAttache attache = findAttache(hostId);
-                                
+
                                 // if the thread:
                                 // 1) timed out waiting for the host to reconnect
                                 // 2) recipient management server is not active any more
@@ -883,14 +880,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
                                 }
-                                
+
                                 if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
                                     s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
                                     iterator.remove();
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
                                 }
-   
+
                                 ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
                                 if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
                                     s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host");
@@ -898,7 +895,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                                     _hostTransferDao.completeAgentTransfer(hostId);
                                     continue;
                                 } 
-                                
+
                                 if (attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
                                     iterator.remove();
                                     try {
@@ -907,7 +904,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                                         s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
                                         continue;
                                     }
-                                    
+
                                 } else {
                                     s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize()); 
                                 }
@@ -925,16 +922,16 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             }
         };
     }
-    
-    
+
+
     private boolean setToWaitForRebalance(final long hostId, long currentOwnerId, long futureOwnerId) {
         s_logger.debug("Adding agent " + hostId + " to the list of agents to transfer");
         synchronized (_agentToTransferIds) {
             return  _agentToTransferIds.add(hostId);
         }
     }
-    
-    
+
+
     protected boolean rebalanceHost(final long hostId, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException{
 
         boolean result = true;
@@ -954,7 +951,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
                 result = false;
             }
-            
+
             if (result) {
                 s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId);
                 finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted);
@@ -962,7 +959,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
                 finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
             }
-                
+
         } else if (futureOwnerId == _nodeId) {
             HostVO host = _hostDao.findById(hostId);
             try {
@@ -977,9 +974,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
                 if (result) {
                     if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
-                }
-                result = loadDirectlyConnectedHost(host, true);
+                        s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
+                    }
+                    result = loadDirectlyConnectedHost(host, true);
                 } else {
                     s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() +
                             " as a part of rebalance process without notification");
@@ -989,7 +986,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex);
                 result = false;
             }
-            
+
             if (result) {
                 s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
             } else {
@@ -999,7 +996,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
 
         return result;
     }
-    
+
 
     protected void finishRebalance(final long hostId, long futureOwnerId, Event event){
 
@@ -1007,21 +1004,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
         }
-        
+
         AgentAttache attache = findAttache(hostId);
         if (attache == null || !(attache instanceof ClusteredAgentAttache)) {
             s_logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already");
             _hostTransferDao.completeAgentTransfer(hostId);
             return;
         } 
-        
+
         ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)attache;
-        
+
         if (success) {
 
             //1) Set transfer mode to false - so the agent can start processing requests normally
             forwardAttache.setTransferMode(false);
-            
+
             //2) Get all transfer requests and route them to peer
             Request requestToTransfer = forwardAttache.getRequestToTransfer();
             while (requestToTransfer != null) {
@@ -1030,20 +1027,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 if (!routeResult) {
                     logD(requestToTransfer.getBytes(), "Failed to route request to peer");
                 }
-                
+
                 requestToTransfer = forwardAttache.getRequestToTransfer();
             }
-            
+
             s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId);
-           
+
         } else {
             failRebalance(hostId);
         }
-        
+
         s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance");
         _hostTransferDao.completeAgentTransfer(hostId);
     }
-    
+
     protected void failRebalance(final long hostId){
         try {
             s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
@@ -1053,19 +1050,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
             s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
         }
     }
-    
+
     protected boolean startRebalance(final long hostId) {
         HostVO host = _hostDao.findById(hostId);
-        
+
         if (host == null || host.getRemoved() != null) {
             s_logger.warn("Unable to find host record, fail start rebalancing process");
             return false;
         } 
-        
+
         synchronized (_agents) {
             ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId);
             if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
-            	handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true);
+                handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true);
                 ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
                 if (forwardAttache == null) {
                     s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
@@ -1086,27 +1083,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
         _hostTransferDao.startAgentTransfer(hostId);
         return true;
     }
-    
+
     protected void cleanupTransferMap(long msId) {
         List<HostTransferMapVO> hostsJoingingCluster = _hostTransferDao.listHostsJoiningCluster(msId);
-        
+
         for (HostTransferMapVO hostJoingingCluster : hostsJoingingCluster) {
             _hostTransferDao.remove(hostJoingingCluster.getId());
         }
-        
+
         List<HostTransferMapVO> hostsLeavingCluster = _hostTransferDao.listHostsLeavingCluster(msId);
         for (HostTransferMapVO hostLeavingCluster : hostsLeavingCluster) {
             _hostTransferDao.remove(hostLeavingCluster.getId());
         }
     }
-    
-    
+
+
     protected class RebalanceTask implements Runnable {
         Long hostId = null;
         Long currentOwnerId = null;
         Long futureOwnerId = null;
-        
-        
+
+
         public RebalanceTask(long hostId, long currentOwnerId, long futureOwnerId) {
             this.hostId = hostId;
             this.currentOwnerId = currentOwnerId;
@@ -1122,10 +1119,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
                 rebalanceHost(hostId, currentOwnerId, futureOwnerId);
             } catch (Exception e) {
                 s_logger.warn("Unable to rebalance host id=" + hostId, e);
-            } finally {
-                StackMaid.current().exitCleanup();
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/110465b5/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
index 9951896..c8bbe02 100755
--- a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
@@ -26,14 +26,12 @@ import javax.inject.Inject;
 import org.springframework.stereotype.Component;
 
 import com.cloud.agent.manager.allocator.HostAllocator;
-import com.cloud.configuration.dao.ConfigurationDao;
 import com.cloud.deploy.DeploymentPlan;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.host.Host;
 import com.cloud.host.Host.Type;
 import com.cloud.host.dao.HostDao;
 import com.cloud.offering.ServiceOffering;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
 
@@ -51,19 +49,19 @@ public class TestingAllocator implements HostAllocator {
             ExcludeList avoid, int returnUpTo) {
         return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true);
     }
-    
+
     @Override
     public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
-			ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
-    	List<Host> availableHosts = new ArrayList<Host>();
-    	Host host = null;    	
+            ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
+        List<Host> availableHosts = new ArrayList<Host>();
+        Host host = null;    	
         if (type == Host.Type.Routing && _routingHost != null) {
-        	host = _hostDao.findById(_routingHost);
+            host = _hostDao.findById(_routingHost);
         } else if (type == Host.Type.Storage && _storageHost != null) {
-        	host = _hostDao.findById(_storageHost);
+            host = _hostDao.findById(_storageHost);
         }
         if(host != null){
-        	availableHosts.add(host);
+            availableHosts.add(host);
         }
         return availableHosts;
     }
@@ -82,9 +80,9 @@ public class TestingAllocator implements HostAllocator {
 
         value = (String)params.get(Host.Type.Storage.toString());
         _storageHost = (value != null) ? Long.parseLong(value) : null;
-        
+
         _name = name;
-        
+
         return true;
     }