You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ed...@apache.org on 2014/06/05 01:50:42 UTC
[30/50] git commit: updated refs/heads/4.3 to f304df0
CLOUDSTACK-6211: Xenserver - HA - SSVM fails to start due to running out of management Ip ranges when testing host down scenarios
Signed-off-by: Koushik Das <ko...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/15bf144f
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/15bf144f
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/15bf144f
Branch: refs/heads/4.3
Commit: 15bf144f1a89ee024f087427a3c72cf233e49356
Parents: edf97ac
Author: Harikrishna Patnala <ha...@citrix.com>
Authored: Fri Mar 7 15:36:26 2014 +0530
Committer: Koushik Das <ko...@apache.org>
Committed: Thu Mar 13 11:02:02 2014 +0530
----------------------------------------------------------------------
.../com/cloud/vm/VirtualMachineManagerImpl.java | 83 ++++++++++----------
1 file changed, 43 insertions(+), 40 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/15bf144f/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index be05cd9..b30fc16 100755
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -1233,62 +1233,65 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
VirtualMachine vm = profile.getVirtualMachine();
State state = vm.getState();
s_logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state");
- if (state == State.Starting) {
- Step step = work.getStep();
- if (step == Step.Starting && !cleanUpEvenIfUnableToStop) {
- s_logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step);
- return false;
- }
+ try {
+ if (state == State.Starting) {
+ Step step = work.getStep();
+ if (step == Step.Starting && !cleanUpEvenIfUnableToStop) {
+ s_logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step);
+ return false;
+ }
- if (step == Step.Started || step == Step.Starting || step == Step.Release) {
+ if (step == Step.Started || step == Step.Starting || step == Step.Release) {
+ if (vm.getHostId() != null) {
+ if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
+ s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process");
+ return false;
+ }
+ }
+ }
+
+ if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) {
+ s_logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step);
+ return true;
+ }
+ } else if (state == State.Stopping) {
if (vm.getHostId() != null) {
if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
- s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process");
+ s_logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process");
return false;
}
}
- }
-
- if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) {
- s_logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step);
- return true;
- }
- } else if (state == State.Stopping) {
- if (vm.getHostId() != null) {
- if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
- s_logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process");
- return false;
+ } else if (state == State.Migrating) {
+ if (vm.getHostId() != null) {
+ if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
+ s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
+ return false;
+ }
}
- }
- } else if (state == State.Migrating) {
- if (vm.getHostId() != null) {
- if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
- s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
- return false;
+ if (vm.getLastHostId() != null) {
+ if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
+ s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
+ return false;
+ }
}
- }
- if (vm.getLastHostId() != null) {
+ } else if (state == State.Running) {
if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
- s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process");
+ s_logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process");
return false;
}
}
- } else if (state == State.Running) {
- if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop)) {
- s_logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process");
- return false;
+ } finally {
+ try {
+ _networkMgr.release(profile, cleanUpEvenIfUnableToStop);
+ s_logger.debug("Successfully released network resources for the vm " + vm);
+ } catch (Exception e) {
+ s_logger.warn("Unable to release some network resources.", e);
}
- }
- try {
- _networkMgr.release(profile, cleanUpEvenIfUnableToStop);
- s_logger.debug("Successfully released network resources for the vm " + vm);
- } catch (Exception e) {
- s_logger.warn("Unable to release some network resources.", e);
+ volumeMgr.release(profile);
+ s_logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state");
}
- volumeMgr.release(profile);
- s_logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state");
return true;
}