You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by se...@apache.org on 2014/06/07 10:50:46 UTC

[06/16] Cleanup of Xen and XenServer terms. Cloned xen plugin creating a xenserver plugin, then removed xen plugin

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/a8212d9e/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
new file mode 100644
index 0000000..1af4579
--- /dev/null
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
@@ -0,0 +1,7563 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.xenserver.resource;
+
+import com.cloud.agent.IAgentControl;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.AttachIsoCommand;
+import com.cloud.agent.api.AttachVolumeAnswer;
+import com.cloud.agent.api.AttachVolumeCommand;
+import com.cloud.agent.api.CheckHealthAnswer;
+import com.cloud.agent.api.CheckHealthCommand;
+import com.cloud.agent.api.CheckNetworkAnswer;
+import com.cloud.agent.api.CheckNetworkCommand;
+import com.cloud.agent.api.CheckOnHostAnswer;
+import com.cloud.agent.api.CheckOnHostCommand;
+import com.cloud.agent.api.CheckVirtualMachineAnswer;
+import com.cloud.agent.api.CheckVirtualMachineCommand;
+import com.cloud.agent.api.CleanupNetworkRulesCmd;
+import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer;
+import com.cloud.agent.api.ClusterVMMetaDataSyncCommand;
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.CreateVMSnapshotAnswer;
+import com.cloud.agent.api.CreateVMSnapshotCommand;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.agent.api.DeleteVMSnapshotAnswer;
+import com.cloud.agent.api.DeleteVMSnapshotCommand;
+import com.cloud.agent.api.GetHostStatsAnswer;
+import com.cloud.agent.api.GetHostStatsCommand;
+import com.cloud.agent.api.GetStorageStatsAnswer;
+import com.cloud.agent.api.GetStorageStatsCommand;
+import com.cloud.agent.api.GetVmDiskStatsAnswer;
+import com.cloud.agent.api.GetVmDiskStatsCommand;
+import com.cloud.agent.api.GetVmStatsAnswer;
+import com.cloud.agent.api.GetVmStatsCommand;
+import com.cloud.agent.api.GetVncPortAnswer;
+import com.cloud.agent.api.GetVncPortCommand;
+import com.cloud.agent.api.HostStatsEntry;
+import com.cloud.agent.api.HostVmStateReportEntry;
+import com.cloud.agent.api.MaintainAnswer;
+import com.cloud.agent.api.MaintainCommand;
+import com.cloud.agent.api.MigrateAnswer;
+import com.cloud.agent.api.MigrateCommand;
+import com.cloud.agent.api.ModifySshKeysCommand;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.NetworkRulesSystemVmCommand;
+import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand;
+import com.cloud.agent.api.OvsCreateGreTunnelAnswer;
+import com.cloud.agent.api.OvsCreateGreTunnelCommand;
+import com.cloud.agent.api.OvsCreateTunnelAnswer;
+import com.cloud.agent.api.OvsCreateTunnelCommand;
+import com.cloud.agent.api.OvsDeleteFlowCommand;
+import com.cloud.agent.api.OvsDestroyBridgeCommand;
+import com.cloud.agent.api.OvsDestroyTunnelCommand;
+import com.cloud.agent.api.OvsFetchInterfaceAnswer;
+import com.cloud.agent.api.OvsFetchInterfaceCommand;
+import com.cloud.agent.api.OvsSetTagAndFlowAnswer;
+import com.cloud.agent.api.OvsSetTagAndFlowCommand;
+import com.cloud.agent.api.OvsSetupBridgeCommand;
+import com.cloud.agent.api.OvsVpcPhysicalTopologyConfigCommand;
+import com.cloud.agent.api.OvsVpcRoutingPolicyConfigCommand;
+import com.cloud.agent.api.PerformanceMonitorAnswer;
+import com.cloud.agent.api.PerformanceMonitorCommand;
+import com.cloud.agent.api.PingCommand;
+import com.cloud.agent.api.PingRoutingCommand;
+import com.cloud.agent.api.PingRoutingWithNwGroupsCommand;
+import com.cloud.agent.api.PingRoutingWithOvsCommand;
+import com.cloud.agent.api.PingTestCommand;
+import com.cloud.agent.api.PlugNicAnswer;
+import com.cloud.agent.api.PlugNicCommand;
+import com.cloud.agent.api.PrepareForMigrationAnswer;
+import com.cloud.agent.api.PrepareForMigrationCommand;
+import com.cloud.agent.api.PvlanSetupCommand;
+import com.cloud.agent.api.ReadyAnswer;
+import com.cloud.agent.api.ReadyCommand;
+import com.cloud.agent.api.RebootAnswer;
+import com.cloud.agent.api.RebootCommand;
+import com.cloud.agent.api.RebootRouterCommand;
+import com.cloud.agent.api.RevertToVMSnapshotAnswer;
+import com.cloud.agent.api.RevertToVMSnapshotCommand;
+import com.cloud.agent.api.ScaleVmAnswer;
+import com.cloud.agent.api.ScaleVmCommand;
+import com.cloud.agent.api.SecurityGroupRuleAnswer;
+import com.cloud.agent.api.SecurityGroupRulesCmd;
+import com.cloud.agent.api.SetupAnswer;
+import com.cloud.agent.api.SetupCommand;
+import com.cloud.agent.api.SetupGuestNetworkCommand;
+import com.cloud.agent.api.StartAnswer;
+import com.cloud.agent.api.StartCommand;
+import com.cloud.agent.api.StartupCommand;
+import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.agent.api.StartupStorageCommand;
+import com.cloud.agent.api.StopAnswer;
+import com.cloud.agent.api.StopCommand;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.agent.api.UnPlugNicAnswer;
+import com.cloud.agent.api.UnPlugNicCommand;
+import com.cloud.agent.api.UpdateHostPasswordCommand;
+import com.cloud.agent.api.UpgradeSnapshotCommand;
+import com.cloud.agent.api.VgpuTypesInfo;
+import com.cloud.agent.api.VmStatsEntry;
+import com.cloud.agent.api.check.CheckSshAnswer;
+import com.cloud.agent.api.check.CheckSshCommand;
+import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand;
+import com.cloud.agent.api.proxy.ConsoleProxyLoadAnswer;
+import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand;
+import com.cloud.agent.api.routing.IpAssocCommand;
+import com.cloud.agent.api.routing.IpAssocVpcCommand;
+import com.cloud.agent.api.routing.NetworkElementCommand;
+import com.cloud.agent.api.routing.SetNetworkACLCommand;
+import com.cloud.agent.api.routing.SetSourceNatCommand;
+import com.cloud.agent.api.storage.CreateAnswer;
+import com.cloud.agent.api.storage.CreateCommand;
+import com.cloud.agent.api.storage.DestroyCommand;
+import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
+import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
+import com.cloud.agent.api.storage.ResizeVolumeAnswer;
+import com.cloud.agent.api.storage.ResizeVolumeCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
+import com.cloud.agent.api.to.GPUDeviceTO;
+import com.cloud.agent.api.to.IpAddressTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.agent.api.to.NicTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer;
+import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource;
+import com.cloud.exception.InternalErrorException;
+import com.cloud.host.Host.Type;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.network.Networks;
+import com.cloud.network.Networks.BroadcastDomainType;
+import com.cloud.network.Networks.IsolationType;
+import com.cloud.network.Networks.TrafficType;
+import com.cloud.network.PhysicalNetworkSetupInfo;
+import com.cloud.resource.ServerResource;
+import com.cloud.resource.hypervisor.HypervisorResource;
+import com.cloud.storage.Storage;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.resource.StorageSubsystemCommandHandler;
+import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase;
+import com.cloud.storage.template.TemplateProp;
+import com.cloud.template.VirtualMachineTemplate.BootloaderType;
+import com.cloud.utils.ExecutionResult;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.Ternary;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.net.NetUtils;
+import com.cloud.utils.ssh.SSHCmdHelper;
+import com.cloud.utils.ssh.SshHelper;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachine.PowerState;
+import com.cloud.vm.VirtualMachine.State;
+import com.cloud.vm.snapshot.VMSnapshot;
+import com.trilead.ssh2.SCPClient;
+import com.xensource.xenapi.Bond;
+import com.xensource.xenapi.Connection;
+import com.xensource.xenapi.Console;
+import com.xensource.xenapi.Host;
+import com.xensource.xenapi.HostCpu;
+import com.xensource.xenapi.HostMetrics;
+import com.xensource.xenapi.Network;
+import com.xensource.xenapi.PBD;
+import com.xensource.xenapi.PIF;
+import com.xensource.xenapi.Pool;
+import com.xensource.xenapi.SR;
+import com.xensource.xenapi.Session;
+import com.xensource.xenapi.Task;
+import com.xensource.xenapi.Types;
+import com.xensource.xenapi.Types.BadServerResponse;
+import com.xensource.xenapi.Types.VmPowerState;
+import com.xensource.xenapi.Types.XenAPIException;
+import com.xensource.xenapi.VBD;
+import com.xensource.xenapi.VBDMetrics;
+import com.xensource.xenapi.VDI;
+import com.xensource.xenapi.VGPU;
+import com.xensource.xenapi.VIF;
+import com.xensource.xenapi.VLAN;
+import com.xensource.xenapi.VM;
+import com.xensource.xenapi.VMGuestMetrics;
+import com.xensource.xenapi.XenAPIObject;
+import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.log4j.Logger;
+import org.apache.xmlrpc.XmlRpcException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
+import java.util.concurrent.TimeoutException;
+
+import javax.ejb.Local;
+import javax.naming.ConfigurationException;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Queue;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+
+/**
+ * CitrixResourceBase encapsulates the calls to the XenServer Xapi process
+ * to perform the required functionalities for CloudStack.
+ *
+ * ==============>  READ THIS  <==============
+ * Because the XenServer objects can expire when the session expires, we cannot
+ * keep any of the actual XenServer objects in this class.  The only
+ * thing that is constant is the UUID of the XenServer objects but not the
+ * objects themselves!  This is very important before you do any changes in
+ * this code here.
+ *
+ */
+@Local(value = ServerResource.class)
+public abstract class CitrixResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer {
+    private static final Logger s_logger = Logger.getLogger(CitrixResourceBase.class);
+    protected static final XenServerConnectionPool ConnPool = XenServerConnectionPool.getInstance();
+    protected String _name;
+    protected String _username;
+    protected Queue<String> _password = new LinkedList<String>();
+    protected final int _retry = 100;
+    protected final int _sleep = 10000;
+    protected long _dcId;
+    protected String _pod;
+    protected String _cluster;
+    protected static final XenServerPoolVms s_vms = new XenServerPoolVms();
+    protected String _privateNetworkName;
+    protected String _linkLocalPrivateNetworkName;
+    protected String _publicNetworkName;
+    protected String _storageNetworkName1;
+    protected String _storageNetworkName2;
+    protected String _guestNetworkName;
+    protected int _wait;
+    protected int _migratewait;
+    protected String _instance; //instance name (default is usually "VM")
+    static final Random Rand = new Random(System.currentTimeMillis());
+    protected boolean _securityGroupEnabled;
+
+    protected IAgentControl _agentControl;
+
+    final int _maxWeight = 256;
+    protected int _heartbeatInterval = 60;
+    protected final XsHost _host = new XsHost();
+
+    // Guest and Host Performance Statistics
+    protected String _consolidationFunction = "AVERAGE";
+    protected int _pollingIntervalInSeconds = 60;
+
+    //Hypervisor specific params with generic value, may need to be overridden for specific versions
+    long _xsMemoryUsed = 128 * 1024 * 1024L; // xenserver hypervisor used 128 M
+    double _xsVirtualizationFactor = 63.0 / 64.0;  // 1 - virtualization overhead
+
+    //static min values for guests on xenserver
+    private static final long mem_128m = 134217728L;
+
+    protected boolean _canBridgeFirewall = false;
+    protected boolean _isOvs = false;
+    protected List<VIF> _tmpDom0Vif = new ArrayList<VIF>();
+    protected StorageSubsystemCommandHandler storageHandler;
+    protected int _maxNics = 7;
+
+    protected VirtualRoutingResource _vrResource;
+
+    public enum SRType {
+        NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT, FILE;
+
+        String _str;
+
+        private SRType() {
+            _str = super.toString().toLowerCase();
+        }
+
+        @Override
+        public String toString() {
+            return _str;
+        }
+
+        public boolean equals(String type) {
+            return _str.equalsIgnoreCase(type);
+        }
+    }
+
+    protected static final HashMap<Types.VmPowerState, PowerState> s_powerStatesTable;
+    static {
+        s_powerStatesTable = new HashMap<Types.VmPowerState, PowerState>();
+        s_powerStatesTable.put(Types.VmPowerState.HALTED, PowerState.PowerOff);
+        s_powerStatesTable.put(Types.VmPowerState.PAUSED, PowerState.PowerOff);
+        s_powerStatesTable.put(Types.VmPowerState.RUNNING, PowerState.PowerOn);
+        s_powerStatesTable.put(Types.VmPowerState.SUSPENDED, PowerState.PowerOff);
+        s_powerStatesTable.put(Types.VmPowerState.UNRECOGNIZED, PowerState.PowerUnknown);
+    }
+
+    // TODO vmsync {
+    protected static final HashMap<Types.VmPowerState, State> s_statesTable;
+    static {
+        s_statesTable = new HashMap<Types.VmPowerState, State>();
+        s_statesTable.put(Types.VmPowerState.HALTED, State.Stopped);
+        s_statesTable.put(Types.VmPowerState.PAUSED, State.Running);
+        s_statesTable.put(Types.VmPowerState.RUNNING, State.Running);
+        s_statesTable.put(Types.VmPowerState.SUSPENDED, State.Running);
+        s_statesTable.put(Types.VmPowerState.UNRECOGNIZED, State.Unknown);
+    }
+
+    // TODO vmsync }
+
+    public XsHost getHost() {
+        return _host;
+    }
+
+    private static boolean isAlienVm(VM vm, Connection conn) throws XenAPIException, XmlRpcException {
+        // TODO : we need a better way to tell whether or not the VM belongs to CloudStack
+        String vmName = vm.getNameLabel(conn);
+        if (vmName.matches("^[ivs]-\\d+-.+"))
+            return false;
+
+        return true;
+    }
+
+    protected boolean cleanupHaltedVms(Connection conn) throws XenAPIException, XmlRpcException {
+        Host host = Host.getByUuid(conn, _host.uuid);
+        Map<VM, VM.Record> vms = VM.getAllRecords(conn);
+        boolean success = true;
+        for (Map.Entry<VM, VM.Record> entry : vms.entrySet()) {
+            VM vm = entry.getKey();
+            VM.Record vmRec = entry.getValue();
+            if (vmRec.isATemplate || vmRec.isControlDomain) {
+                continue;
+            }
+
+            if (VmPowerState.HALTED.equals(vmRec.powerState) && vmRec.affinity.equals(host) && !isAlienVm(vm, conn)) {
+                try {
+                    vm.destroy(conn);
+                } catch (Exception e) {
+                    s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e);
+                    success = false;
+                }
+            }
+        }
+        return success;
+    }
+
+    protected boolean isRefNull(XenAPIObject object) {
+        return (object == null || object.toWireString().equals("OpaqueRef:NULL") || object.toWireString().equals("<not in database>"));
+    }
+
+    @Override
+    public void disconnected() {
+    }
+
+    protected boolean pingdomr(Connection conn, String host, String port) {
+        String status;
+        status = callHostPlugin(conn, "vmops", "pingdomr", "host", host, "port", port);
+
+        if (status == null || status.isEmpty()) {
+            return false;
+        }
+
+        return true;
+
+    }
+
+    protected boolean pingXAPI() {
+        Connection conn = getConnection();
+        try {
+            Host host = Host.getByUuid(conn, _host.uuid);
+            if( !host.getEnabled(conn) ) {
+                s_logger.debug("Host " + _host.ip + " is not enabled!");
+                return false;
+            }
+        } catch (Exception e) {
+            s_logger.debug("cannot get host enabled status, host " + _host.ip + " due to " + e.toString(),  e);
+            return false;
+        }
+        try {
+            callHostPlugin(conn, "echo", "main");
+        } catch (Exception e) {
+            s_logger.debug("cannot ping host " + _host.ip + " due to " + e.toString(),  e);
+            return false;
+        }
+        return true;
+    }
+
+
+    protected String logX(XenAPIObject obj, String msg) {
+        return new StringBuilder("Host ").append(_host.ip).append(" ").append(obj.toWireString()).append(": ").append(msg).toString();
+    }
+
+    @Override
+    public Answer executeRequest(Command cmd) {
+        Class<? extends Command> clazz = cmd.getClass();
+        if (clazz == CreateCommand.class) {
+            return execute((CreateCommand)cmd);
+        } else if (cmd instanceof NetworkElementCommand) {
+            return _vrResource.executeRequest((NetworkElementCommand)cmd);
+        } else if (clazz == CheckConsoleProxyLoadCommand.class) {
+            return execute((CheckConsoleProxyLoadCommand)cmd);
+        } else if (clazz == WatchConsoleProxyLoadCommand.class) {
+            return execute((WatchConsoleProxyLoadCommand)cmd);
+        } else if (clazz == ReadyCommand.class) {
+            return execute((ReadyCommand)cmd);
+        } else if (clazz == GetHostStatsCommand.class) {
+            return execute((GetHostStatsCommand)cmd);
+        } else if (clazz == GetVmStatsCommand.class) {
+            return execute((GetVmStatsCommand)cmd);
+        } else if (clazz == GetVmDiskStatsCommand.class) {
+            return execute((GetVmDiskStatsCommand)cmd);
+        } else if (clazz == CheckHealthCommand.class) {
+            return execute((CheckHealthCommand)cmd);
+        } else if (clazz == StopCommand.class) {
+            return execute((StopCommand)cmd);
+        } else if (clazz == RebootRouterCommand.class) {
+            return execute((RebootRouterCommand)cmd);
+        } else if (clazz == RebootCommand.class) {
+            return execute((RebootCommand)cmd);
+        } else if (clazz == CheckVirtualMachineCommand.class) {
+            return execute((CheckVirtualMachineCommand)cmd);
+        } else if (clazz == PrepareForMigrationCommand.class) {
+            return execute((PrepareForMigrationCommand)cmd);
+        } else if (clazz == MigrateCommand.class) {
+            return execute((MigrateCommand)cmd);
+        } else if (clazz == DestroyCommand.class) {
+            return execute((DestroyCommand)cmd);
+        } else if (clazz == CreateStoragePoolCommand.class) {
+            return execute((CreateStoragePoolCommand)cmd);
+        } else if (clazz == ModifyStoragePoolCommand.class) {
+            return execute((ModifyStoragePoolCommand)cmd);
+        } else if (clazz == DeleteStoragePoolCommand.class) {
+            return execute((DeleteStoragePoolCommand) cmd);
+        }else if (clazz == ResizeVolumeCommand.class) {
+            return execute((ResizeVolumeCommand) cmd);
+        } else if (clazz == AttachVolumeCommand.class) {
+            return execute((AttachVolumeCommand)cmd);
+        } else if (clazz == AttachIsoCommand.class) {
+            return execute((AttachIsoCommand) cmd);
+        } else if (clazz == UpgradeSnapshotCommand.class) {
+            return execute((UpgradeSnapshotCommand)cmd);
+        } else if (clazz == GetStorageStatsCommand.class) {
+            return execute((GetStorageStatsCommand)cmd);
+        } else if (clazz == PrimaryStorageDownloadCommand.class) {
+            return execute((PrimaryStorageDownloadCommand)cmd);
+        } else if (clazz == GetVncPortCommand.class) {
+            return execute((GetVncPortCommand)cmd);
+        } else if (clazz == SetupCommand.class) {
+            return execute((SetupCommand)cmd);
+        } else if (clazz == MaintainCommand.class) {
+            return execute((MaintainCommand)cmd);
+        } else if (clazz == PingTestCommand.class) {
+            return execute((PingTestCommand)cmd);
+        } else if (clazz == CheckOnHostCommand.class) {
+            return execute((CheckOnHostCommand)cmd);
+        } else if (clazz == ModifySshKeysCommand.class) {
+            return execute((ModifySshKeysCommand)cmd);
+        } else if (clazz == StartCommand.class) {
+            return execute((StartCommand)cmd);
+        } else if (clazz == CheckSshCommand.class) {
+            return execute((CheckSshCommand)cmd);
+        } else if (clazz == SecurityGroupRulesCmd.class) {
+            return execute((SecurityGroupRulesCmd)cmd);
+        } else if (clazz == OvsFetchInterfaceCommand.class) {
+            return execute((OvsFetchInterfaceCommand)cmd);
+        } else if (clazz == OvsCreateGreTunnelCommand.class) {
+            return execute((OvsCreateGreTunnelCommand)cmd);
+        } else if (clazz == OvsSetTagAndFlowCommand.class) {
+            return execute((OvsSetTagAndFlowCommand)cmd);
+        } else if (clazz == OvsDeleteFlowCommand.class) {
+            return execute((OvsDeleteFlowCommand)cmd);
+        } else if (clazz == OvsVpcPhysicalTopologyConfigCommand.class) {
+            return execute((OvsVpcPhysicalTopologyConfigCommand) cmd);
+        } else if (clazz == OvsVpcRoutingPolicyConfigCommand.class) {
+            return execute((OvsVpcRoutingPolicyConfigCommand) cmd);
+        } else if (clazz == CleanupNetworkRulesCmd.class) {
+            return execute((CleanupNetworkRulesCmd)cmd);
+        } else if (clazz == NetworkRulesSystemVmCommand.class) {
+            return execute((NetworkRulesSystemVmCommand)cmd);
+        } else if (clazz == OvsCreateTunnelCommand.class) {
+            return execute((OvsCreateTunnelCommand)cmd);
+        } else if (clazz == OvsSetupBridgeCommand.class) {
+            return execute((OvsSetupBridgeCommand)cmd);
+        } else if (clazz == OvsDestroyBridgeCommand.class) {
+            return execute((OvsDestroyBridgeCommand)cmd);
+        } else if (clazz == OvsDestroyTunnelCommand.class) {
+            return execute((OvsDestroyTunnelCommand)cmd);
+        } else if (clazz == UpdateHostPasswordCommand.class) {
+            return execute((UpdateHostPasswordCommand)cmd);
+        } else if (cmd instanceof ClusterVMMetaDataSyncCommand) {
+            return execute((ClusterVMMetaDataSyncCommand)cmd);
+        } else if (clazz == CheckNetworkCommand.class) {
+            return execute((CheckNetworkCommand)cmd);
+        } else if (clazz == PlugNicCommand.class) {
+            return execute((PlugNicCommand)cmd);
+        } else if (clazz == UnPlugNicCommand.class) {
+            return execute((UnPlugNicCommand) cmd);
+        } else if (cmd instanceof StorageSubSystemCommand) {
+            return storageHandler.handleStorageCommands((StorageSubSystemCommand) cmd);
+        } else if (clazz == CreateVMSnapshotCommand.class) {
+            return execute((CreateVMSnapshotCommand) cmd);
+        } else if (clazz == DeleteVMSnapshotCommand.class) {
+            return execute((DeleteVMSnapshotCommand) cmd);
+        } else if (clazz == RevertToVMSnapshotCommand.class) {
+            return execute((RevertToVMSnapshotCommand) cmd);
+        } else if (clazz == NetworkRulesVmSecondaryIpCommand.class) {
+            return execute((NetworkRulesVmSecondaryIpCommand) cmd);
+        } else if (clazz == ScaleVmCommand.class) {
+            return execute((ScaleVmCommand)cmd);
+        } else if (clazz == PvlanSetupCommand.class) {
+            return execute((PvlanSetupCommand)cmd);
+        } else if (clazz == PerformanceMonitorCommand.class) {
+            return execute((PerformanceMonitorCommand)cmd);
+        } else {
+            return Answer.createUnsupportedCommandAnswer(cmd);
+        }
+    }
+
+    @Override
+    public ExecutionResult executeInVR(String routerIP, String script, String args, int timeout) {
+        Pair<Boolean, String> result;
+        try {
+            s_logger.debug("Executing command in VR:  /opt/cloud/bin/router_proxy.sh " + script + " " + routerIP + " " + args);
+            result = SshHelper.sshExecute(_host.ip, 22, _username, null, _password.peek(), "/opt/cloud/bin/router_proxy.sh " + script + " " + routerIP + " " + args,
+                    60000, 60000, timeout * 1000);
+        } catch (Exception e) {
+            return new ExecutionResult(false, e.getMessage());
+        }
+        return new ExecutionResult(result.first(), result.second());
+    }
+
+    @Override
+    public ExecutionResult executeInVR(String routerIP, String script, String args) {
+        // Timeout is 120 seconds by default
+        return executeInVR(routerIP, script, args, 120);
+    }
+
+    @Override
+    public ExecutionResult createFileInVR(String routerIp, String path, String filename, String content) {
+        Connection conn = getConnection();
+        String rc = callHostPlugin(conn, "vmops", "createFileInDomr", "domrip", routerIp, "filepath", path + filename, "filecontents", content);
+        // Fail case would be start with "fail#"
+        return new ExecutionResult(rc.startsWith("succ#"), rc.substring(5));
+    }
+
+    @Override
+    public ExecutionResult prepareCommand(NetworkElementCommand cmd) {
+        //Update IP used to access router
+        cmd.setRouterAccessIp(cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP));
+        assert cmd.getRouterAccessIp() != null;
+
+        if (cmd instanceof IpAssocVpcCommand) {
+            return prepareNetworkElementCommand((IpAssocVpcCommand)cmd);
+        } else if (cmd instanceof IpAssocCommand) {
+            return prepareNetworkElementCommand((IpAssocCommand)cmd);
+        } else if (cmd instanceof SetupGuestNetworkCommand) {
+            return prepareNetworkElementCommand((SetupGuestNetworkCommand)cmd);
+        } else if (cmd instanceof SetSourceNatCommand) {
+            return prepareNetworkElementCommand((SetSourceNatCommand)cmd);
+        } else if (cmd instanceof SetNetworkACLCommand) {
+            return prepareNetworkElementCommand((SetNetworkACLCommand)cmd);
+        }
+        return new ExecutionResult(true, null);
+    }
+
+    @Override
+    public ExecutionResult cleanupCommand(NetworkElementCommand cmd) {
+        if (cmd instanceof IpAssocCommand && !(cmd instanceof IpAssocVpcCommand)) {
+            return cleanupNetworkElementCommand((IpAssocCommand)cmd);
+        }
+        return new ExecutionResult(true, null);
+    }
+
+    private Answer execute(PerformanceMonitorCommand cmd) {
+        Connection conn = getConnection();
+        String perfMon = getPerfMon(conn, cmd.getParams(), cmd.getWait());
+        if (perfMon == null) {
+            return new PerformanceMonitorAnswer(cmd, false, perfMon);
+        } else
+            return new PerformanceMonitorAnswer(cmd, true, perfMon);
+    }
+
+    private String getPerfMon(Connection conn, Map<String, String> params,
+            int wait) {
+        String result = null;
+        try {
+            result = callHostPluginAsync(conn, "vmopspremium", "asmonitor", 60,
+                    params);
+            if (result != null)
+                return result;
+        } catch (Exception e) {
+            s_logger.error("Can not get performance monitor for AS due to ", e);
+        }
+        return null;
+    }
+
+    protected String callHostPluginAsync(Connection conn, String plugin,
+            String cmd, int wait, Map<String, String> params) {
+        int timeout = wait * 1000;
+        Map<String, String> args = new HashMap<String, String>();
+        Task task = null;
+        try {
+            for (String key : params.keySet()) {
+                args.put(key, params.get(key));
+            }
+            if (s_logger.isTraceEnabled()) {
+                s_logger.trace("callHostPlugin executing for command " + cmd
+                        + " with " + getArgsString(args));
+            }
+            Host host = Host.getByUuid(conn, _host.uuid);
+            task = host.callPluginAsync(conn, plugin, cmd, args);
+            // poll every 1 seconds
+            waitForTask(conn, task, 1000, timeout);
+            checkForSuccess(conn, task);
+            String result = task.getResult(conn);
+            if (s_logger.isTraceEnabled()) {
+                s_logger.trace("callHostPlugin Result: " + result);
+            }
+            return result.replace("<value>", "").replace("</value>", "")
+                    .replace("\n", "");
+        } catch (Types.HandleInvalid e) {
+            s_logger.warn("callHostPlugin failed for cmd: " + cmd
+                    + " with args " + getArgsString(args)
+                    + " due to HandleInvalid clazz:" + e.clazz + ", handle:"
+                    + e.handle);
+        } catch (Exception e) {
+            s_logger.warn(
+                    "callHostPlugin failed for cmd: " + cmd + " with args "
+                            + getArgsString(args) + " due to " + e.toString(),
+                            e);
+        } finally {
+            if (task != null) {
+                try {
+                    task.destroy(conn);
+                } catch (Exception e1) {
+                    s_logger.warn("unable to destroy task(" + task.toString()
+                            + ") on host(" + _host.uuid + ") due to ", e1);
+                }
+            }
+        }
+        return null;
+    }
+
+    protected void scaleVM(Connection conn, VM vm, VirtualMachineTO vmSpec, Host host) throws XenAPIException, XmlRpcException {
+
+        Long staticMemoryMax = vm.getMemoryStaticMax(conn);
+        Long staticMemoryMin = vm.getMemoryStaticMin(conn);
+        Long newDynamicMemoryMin = vmSpec.getMinRam();
+        Long newDynamicMemoryMax = vmSpec.getMaxRam();
+        if (staticMemoryMin > newDynamicMemoryMin || newDynamicMemoryMax > staticMemoryMax) {
+            throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: " + "0 <= memory-static-min(" + staticMemoryMin +
+                    ") <= memory-dynamic-min(" + newDynamicMemoryMin + ") <= memory-dynamic-max(" + newDynamicMemoryMax + ") <= memory-static-max(" + staticMemoryMax + ")");
+        }
+
+        vm.setMemoryDynamicRange(conn, newDynamicMemoryMin, newDynamicMemoryMax);
+        vm.setVCPUsNumberLive(conn, (long)vmSpec.getCpus());
+
+        Integer speed = vmSpec.getMinSpeed();
+        if (speed != null) {
+
+            int cpuWeight = _maxWeight; //cpu_weight
+
+            // weight based allocation
+
+            cpuWeight = (int)((speed * 0.99) / _host.speed * _maxWeight);
+            if (cpuWeight > _maxWeight) {
+                cpuWeight = _maxWeight;
+            }
+
+            if (vmSpec.getLimitCpuUse()) {
+                long utilization = 0; // max CPU cap, default is unlimited
+                utilization = (int)((vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus()) / _host.speed * 100);
+                //vm.addToVCPUsParamsLive(conn, "cap", Long.toString(utilization)); currently xenserver doesnot support Xapi to add VCPUs params live.
+                callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "cap", "value", Long.toString(utilization), "vmname", vmSpec.getName());
+            }
+            //vm.addToVCPUsParamsLive(conn, "weight", Integer.toString(cpuWeight));
+            callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "weight", "value", Integer.toString(cpuWeight), "vmname", vmSpec.getName());
+        }
+    }
+
+    public ScaleVmAnswer execute(ScaleVmCommand cmd) {
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        String vmName = vmSpec.getName();
+        try {
+            Connection conn = getConnection();
+            Set<VM> vms = VM.getByNameLabel(conn, vmName);
+            Host host = Host.getByUuid(conn, _host.uuid);
+
+            // If DMC is not enable then don't execute this command.
+            if (!isDmcEnabled(conn, host)) {
+                throw new CloudRuntimeException("Unable to scale the vm: " + vmName + " as DMC - Dynamic memory control is not enabled for the XenServer:" + _host.uuid +
+                        " ,check your license and hypervisor version.");
+            }
+
+            // stop vm which is running on this host or is in halted state
+            Iterator<VM> iter = vms.iterator();
+            while (iter.hasNext()) {
+                VM vm = iter.next();
+                VM.Record vmr = vm.getRecord(conn);
+
+                if ((vmr.powerState == VmPowerState.HALTED) ||
+                        (vmr.powerState == VmPowerState.RUNNING && !isRefNull(vmr.residentOn) && !vmr.residentOn.getUuid(conn).equals(_host.uuid))) {
+                    iter.remove();
+                }
+            }
+
+            if (vms.size() == 0) {
+                s_logger.info("No running VM " + vmName + " exists on XenServer" + _host.uuid);
+                return new ScaleVmAnswer(cmd, false, "VM does not exist");
+            }
+
+            for (VM vm : vms) {
+                vm.getRecord(conn);
+                try {
+                    scaleVM(conn, vm, vmSpec, host);
+                } catch (Exception e) {
+                    String msg = "Catch exception " + e.getClass().getName() + " when scaling VM:" + vmName + " due to " + e.toString();
+                    s_logger.debug(msg);
+                    return new ScaleVmAnswer(cmd, false, msg);
+                }
+
+            }
+            String msg = "scaling VM " + vmName + " is successful on host " + host;
+            s_logger.debug(msg);
+            return new ScaleVmAnswer(cmd, true, msg);
+
+        } catch (XenAPIException e) {
+            String msg = "Upgrade Vm " + vmName + " fail due to " + e.toString();
+            s_logger.warn(msg, e);
+            return new ScaleVmAnswer(cmd, false, msg);
+        } catch (XmlRpcException e) {
+            String msg = "Upgrade Vm " + vmName + " fail due to " + e.getMessage();
+            s_logger.warn(msg, e);
+            return new ScaleVmAnswer(cmd, false, msg);
+        } catch (Exception e) {
+            String msg = "Unable to upgrade " + vmName + " due to " + e.getMessage();
+            s_logger.warn(msg, e);
+            return new ScaleVmAnswer(cmd, false, msg);
+        }
+    }
+
+    private Answer execute(RevertToVMSnapshotCommand cmd) {
+        String vmName = cmd.getVmName();
+        List<VolumeObjectTO> listVolumeTo = cmd.getVolumeTOs();
+        VMSnapshot.Type vmSnapshotType = cmd.getTarget().getType();
+        Boolean snapshotMemory = vmSnapshotType == VMSnapshot.Type.DiskAndMemory;
+        Connection conn = getConnection();
+        VirtualMachine.State vmState = null;
+        VM vm = null;
+        try {
+
+            // remove vm from s_vms, for delta sync
+            s_vms.remove(_cluster, _name, vmName);
+
+            Set<VM> vmSnapshots = VM.getByNameLabel(conn, cmd.getTarget().getSnapshotName());
+            if (vmSnapshots.size() == 0)
+                return new RevertToVMSnapshotAnswer(cmd, false, "Cannot find vmSnapshot with name: " + cmd.getTarget().getSnapshotName());
+
+            VM vmSnapshot = vmSnapshots.iterator().next();
+
+            // find target VM or creating a work VM
+            try {
+                vm = getVM(conn, vmName);
+            } catch (Exception e) {
+                vm = createWorkingVM(conn, vmName, cmd.getGuestOSType(), listVolumeTo);
+            }
+
+            if (vm == null) {
+                return new RevertToVMSnapshotAnswer(cmd, false, "Revert to VM Snapshot Failed due to can not find vm: " + vmName);
+            }
+
+            // call plugin to execute revert
+            revertToSnapshot(conn, vmSnapshot, vmName, vm.getUuid(conn), snapshotMemory, _host.uuid);
+            vm = getVM(conn, vmName);
+            Set<VBD> vbds = vm.getVBDs(conn);
+            Map<String, VDI> vdiMap = new HashMap<String, VDI>();
+            // get vdi:vbdr to a map
+            for (VBD vbd : vbds) {
+                VBD.Record vbdr = vbd.getRecord(conn);
+                if (vbdr.type == Types.VbdType.DISK) {
+                    VDI vdi = vbdr.VDI;
+                    vdiMap.put(vbdr.userdevice, vdi);
+                }
+            }
+
+            if (!snapshotMemory) {
+                vm.destroy(conn);
+                vmState = VirtualMachine.State.Stopped;
+            } else {
+                s_vms.put(_cluster, _name, vmName, State.Running);
+                vmState = VirtualMachine.State.Running;
+            }
+
+            // after revert, VM's volumes path have been changed, need to report to manager
+            for (VolumeObjectTO volumeTo : listVolumeTo) {
+                Long deviceId = volumeTo.getDeviceId();
+                VDI vdi = vdiMap.get(deviceId.toString());
+                volumeTo.setPath(vdi.getUuid(conn));
+            }
+
+            return new RevertToVMSnapshotAnswer(cmd, listVolumeTo, vmState);
+        } catch (Exception e) {
+            s_logger.error("revert vm " + vmName + " to snapshot " + cmd.getTarget().getSnapshotName() + " failed due to " + e.getMessage());
+            return new RevertToVMSnapshotAnswer(cmd, false, e.getMessage());
+        }
+    }
+
+    protected String revertToSnapshot(Connection conn, VM vmSnapshot, String vmName, String oldVmUuid, Boolean snapshotMemory, String hostUUID) throws XenAPIException,
+    XmlRpcException {
+
+        String results =
+                callHostPluginAsync(conn, "vmopsSnapshot", "revert_memory_snapshot", 10 * 60 * 1000, "snapshotUUID", vmSnapshot.getUuid(conn), "vmName", vmName, "oldVmUuid",
+                        oldVmUuid, "snapshotMemory", snapshotMemory.toString(), "hostUUID", hostUUID);
+        String errMsg = null;
+        if (results == null || results.isEmpty()) {
+            errMsg = "revert_memory_snapshot return null";
+        } else {
+            if (results.equals("0")) {
+                return results;
+            } else {
+                errMsg = "revert_memory_snapshot exception";
+            }
+        }
+        s_logger.warn(errMsg);
+        throw new CloudRuntimeException(errMsg);
+    }
+
+    protected XsLocalNetwork getNativeNetworkForTraffic(Connection conn, TrafficType type, String name) throws XenAPIException, XmlRpcException {
+        if (name != null) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Looking for network named " + name);
+            }
+            return getNetworkByName(conn, name);
+        }
+
+        if (type == TrafficType.Guest) {
+            return new XsLocalNetwork(Network.getByUuid(conn, _host.guestNetwork), null, PIF.getByUuid(conn, _host.guestPif), null);
+        } else if (type == TrafficType.Control) {
+            setupLinkLocalNetwork(conn);
+            return new XsLocalNetwork(Network.getByUuid(conn, _host.linkLocalNetwork));
+        } else if (type == TrafficType.Management) {
+            return new XsLocalNetwork(Network.getByUuid(conn, _host.privateNetwork), null, PIF.getByUuid(conn, _host.privatePif), null);
+        } else if (type == TrafficType.Public) {
+            return new XsLocalNetwork(Network.getByUuid(conn, _host.publicNetwork), null, PIF.getByUuid(conn, _host.publicPif), null);
+        } else if (type == TrafficType.Storage) {
+            /*   TrafficType.Storage is for secondary storage, while storageNetwork1 is for primary storage, we need better name here */
+            return new XsLocalNetwork(Network.getByUuid(conn, _host.storageNetwork1), null, PIF.getByUuid(conn, _host.storagePif1), null);
+        }
+
+        throw new CloudRuntimeException("Unsupported network type: " + type);
+    }
+
+    /**
+     * This is a tricky to create network in xenserver.
+     * if you create a network then create bridge by brctl or openvswitch yourself,
+     * then you will get an expection that is "REQUIRED_NETWROK" when you start a
+     * vm with this network. The soultion is, create a vif of dom0 and plug it in
+     * network, xenserver will create the bridge on behalf of you
+     * @throws XmlRpcException
+     * @throws XenAPIException
+     */
+    private void enableXenServerNetwork(Connection conn, Network nw, String vifNameLabel, String networkDesc) throws XenAPIException, XmlRpcException {
+        /* Make sure there is a physical bridge on this network */
+        VIF dom0vif = null;
+        Pair<VM, VM.Record> vm = getControlDomain(conn);
+        VM dom0 = vm.first();
+        // Create a VIF unless there's not already another VIF
+        Set<VIF> dom0Vifs = dom0.getVIFs(conn);
+        for (VIF vif : dom0Vifs) {
+            vif.getRecord(conn);
+            if (vif.getNetwork(conn).getUuid(conn).equals(nw.getUuid(conn))) {
+                dom0vif = vif;
+                s_logger.debug("A VIF for dom0 has already been found - No need to create one");
+            }
+        }
+
+        if (dom0vif == null) {
+            s_logger.debug("Create a vif on dom0 for " + networkDesc);
+            VIF.Record vifr = new VIF.Record();
+            vifr.VM = dom0;
+            vifr.device = getLowestAvailableVIFDeviceNum(conn, dom0);
+            if (vifr.device == null) {
+                s_logger.debug("Failed to create " + networkDesc + ", no vif available");
+                return;
+            }
+            Map<String, String> config = new HashMap<String, String>();
+            config.put("nameLabel", vifNameLabel);
+            vifr.otherConfig = config;
+            vifr.MAC = "FE:FF:FF:FF:FF:FF";
+            vifr.network = nw;
+
+            vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
+            dom0vif = VIF.create(conn, vifr);
+            synchronized (_tmpDom0Vif) {
+                _tmpDom0Vif.add(dom0vif);
+            }
+            try {
+                dom0vif.plug(conn);
+            } catch (Exception e) {
+                // though an exception is thrown here, VIF actually gets plugged-in to dom0, so just ignore the exception
+            }
+            dom0vif.unplug(conn);
+        }
+    }
+
+    private synchronized Network setupvSwitchNetwork(Connection conn) {
+        try {
+            if (_host.vswitchNetwork == null) {
+                Network vswitchNw = null;
+                Network.Record rec = new Network.Record();
+                String nwName = Networks.BroadcastScheme.VSwitch.toString();
+                Set<Network> networks = Network.getByNameLabel(conn, nwName);
+
+                if (networks.size() == 0) {
+                    rec.nameDescription = "vswitch network for " + nwName;
+                    rec.nameLabel = nwName;
+                    vswitchNw = Network.create(conn, rec);
+                } else {
+                    vswitchNw = networks.iterator().next();
+                }
+                if (!is_xcp())
+                    enableXenServerNetwork(conn, vswitchNw, "vswitch", "vswitch network");
+                _host.vswitchNetwork = vswitchNw;
+            }
+            return _host.vswitchNetwork;
+        } catch (BadServerResponse e) {
+            s_logger.error("Failed to setup vswitch network", e);
+        } catch (XenAPIException e) {
+            s_logger.error("Failed to setup vswitch network", e);
+        } catch (XmlRpcException e) {
+            s_logger.error("Failed to setup vswitch network", e);
+        }
+
+        return null;
+    }
+
+    /**
+     * This method just creates a XenServer network following the tunnel network naming convention
+     */
+    private synchronized Network findOrCreateTunnelNetwork(Connection conn, String nwName) {
+        try {
+            Network nw = null;
+            Network.Record rec = new Network.Record();
+            Set<Network> networks = Network.getByNameLabel(conn, nwName);
+
+
+            if (networks.size() == 0) {
+                rec.nameDescription = "tunnel network id# " + nwName;
+                rec.nameLabel = nwName;
+                //Initialize the ovs-host-setup to avoid error when doing get-param in plugin
+                Map<String, String> otherConfig = new HashMap<String, String>();
+                otherConfig.put("ovs-host-setup", "");
+                rec.otherConfig = otherConfig;
+                nw = Network.create(conn, rec);
+                // Plug dom0 vif only when creating network
+                enableXenServerNetwork(conn, nw, nwName, "tunnel network for account " + nwName);
+                s_logger.debug("### XenServer network for tunnels created:" + nwName);
+            } else {
+                nw = networks.iterator().next();
+                enableXenServerNetwork(conn, nw, nwName, "tunnel network for account " + nwName);
+                s_logger.debug("XenServer network for tunnels found:" + nwName);
+            }
+            return nw;
+        } catch (Exception e) {
+            s_logger.warn("createTunnelNetwork failed", e);
+            return null;
+        }
+    }
+
+    /**
+     * This method creates a XenServer network and configures it for being used as a L2-in-L3 tunneled network
+     */
+    private synchronized Network configureTunnelNetwork(Connection conn, Long networkId, long hostId, String bridgeName) {
+        try {
+            Network nw = findOrCreateTunnelNetwork(conn, bridgeName);
+            String nwName = bridgeName;
+            //Invoke plugin to setup the bridge which will be used by this network
+            String bridge = nw.getBridge(conn);
+            Map<String, String> nwOtherConfig = nw.getOtherConfig(conn);
+            String configuredHosts = nwOtherConfig.get("ovs-host-setup");
+            boolean configured = false;
+            if (configuredHosts != null) {
+                String hostIdsStr[] = configuredHosts.split(",");
+                for (String hostIdStr : hostIdsStr) {
+                    if (hostIdStr.equals(((Long)hostId).toString())) {
+                        configured = true;
+                        break;
+                    }
+                }
+            }
+            if (!configured) {
+                // Plug dom0 vif only if not done before for network and host
+                enableXenServerNetwork(conn, nw, nwName, "tunnel network for account " + bridgeName);
+                String result;
+                if (bridgeName.startsWith("OVS-DR-VPC-Bridge")) {
+                    result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge_for_distributed_routing", "bridge", bridge,
+                            "key", bridgeName,
+                            "xs_nw_uuid", nw.getUuid(conn),
+                            "cs_host_id", ((Long)hostId).toString());
+                } else {
+                    result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge", "bridge", bridge,
+                            "key", bridgeName,
+                            "xs_nw_uuid", nw.getUuid(conn),
+                            "cs_host_id", ((Long)hostId).toString());
+                }
+
+                //Note down the fact that the ovs bridge has been setup
+                String[] res = result.split(":");
+                if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) {
+                    //TODO: Should make this error not fatal?
+                    throw new CloudRuntimeException("Unable to pre-configure OVS bridge " + bridge );
+                }
+            }
+            return nw;
+        } catch (Exception e) {
+            s_logger.warn("createandConfigureTunnelNetwork failed", e);
+            return null;
+        }
+    }
+
+    private synchronized void destroyTunnelNetwork(Connection conn, Network nw, long hostId) {
+        try {
+            String bridge = nw.getBridge(conn);
+            String result = callHostPlugin(conn, "ovstunnel", "destroy_ovs_bridge", "bridge", bridge,
+                    "cs_host_id", ((Long)hostId).toString());
+            String[] res = result.split(":");
+            if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) {
+                //TODO: Should make this error not fatal?
+                //Can Concurrent VM shutdown/migration/reboot events can cause this method
+                //to be executed on a bridge which has already been removed?
+                throw new CloudRuntimeException("Unable to remove OVS bridge " + bridge + ":" + res);
+            }
+            return;
+        } catch (Exception e) {
+            s_logger.warn("destroyTunnelNetwork failed:", e);
+            return;
+        }
+    }
+
+    protected Network getNetwork(Connection conn, NicTO nic) throws XenAPIException, XmlRpcException {
+        String name = nic.getName();
+        XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), name);
+        if (network == null) {
+            s_logger.error("Network is not configured on the backend for nic " + nic.toString());
+            throw new CloudRuntimeException("Network for the backend is not configured correctly for network broadcast domain: " + nic.getBroadcastUri());
+        }
+        URI uri = nic.getBroadcastUri();
+        BroadcastDomainType type = nic.getBroadcastType();
+        if (uri != null && uri.toString().contains("untagged")) {
+            return network.getNetwork();
+        } else if (type == BroadcastDomainType.Vlan) {
+            assert (BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Vlan);
+            long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
+            return enableVlanNetwork(conn, vlan, network);
+        } else if (type == BroadcastDomainType.Native || type == BroadcastDomainType.LinkLocal) {
+            return network.getNetwork();
+        } else if (type == BroadcastDomainType.Vswitch) {
+            String header = uri.toString().substring(Networks.BroadcastDomainType.Vswitch.scheme().length() + "://".length());
+            if (header.startsWith("vlan")) {
+                _isOvs = true;
+                return setupvSwitchNetwork(conn);
+            } else {
+                return findOrCreateTunnelNetwork(conn, getOvsTunnelNetworkName(uri.getAuthority()));
+            }
+        } else if (type == BroadcastDomainType.Storage) {
+            if (uri == null) {
+                return network.getNetwork();
+            } else {
+                long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
+                return enableVlanNetwork(conn, vlan, network);
+            }
+        } else if (type == BroadcastDomainType.Lswitch) {
+            // Nicira Logical Switch
+            return network.getNetwork();
+        } else if (type == BroadcastDomainType.Pvlan) {
+            assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Pvlan;
+            // should we consider moving this NetUtils method to BroadcastDomainType?
+            long vlan = Long.parseLong(NetUtils.getPrimaryPvlanFromUri(uri));
+            return enableVlanNetwork(conn, vlan, network);
+        }
+
+        throw new CloudRuntimeException("Unable to support this type of network broadcast domain: " + nic.getBroadcastUri());
+    }
+
+    private String getOvsTunnelNetworkName(String broadcastUri) {
+        if (broadcastUri.contains(".")) {
+            String[] parts = broadcastUri.split("\\.");
+            return "OVS-DR-VPC-Bridge"+parts[0];
+         } else {
+            try {
+                return "OVSTunnel" + broadcastUri;
+            } catch (Exception e) {
+                return null;
+            }
+         }
+    }
+
+    protected VIF createVif(Connection conn, String vmName, VM vm, VirtualMachineTO vmSpec, NicTO nic) throws XmlRpcException, XenAPIException {
+        assert (nic.getUuid() != null) : "Nic should have a uuid value";
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Creating VIF for " + vmName + " on nic " + nic);
+        }
+        VIF.Record vifr = new VIF.Record();
+        vifr.VM = vm;
+        vifr.device = Integer.toString(nic.getDeviceId());
+        vifr.MAC = nic.getMac();
+
+        // Nicira needs these IDs to find the NIC
+        vifr.otherConfig = new HashMap<String, String>();
+        vifr.otherConfig.put("nicira-iface-id", nic.getUuid());
+        vifr.otherConfig.put("nicira-vm-id", vm.getUuid(conn));
+        // Provide XAPI with the cloudstack vm and nic uids.
+        vifr.otherConfig.put("cloudstack-nic-id", nic.getUuid());
+        if (vmSpec != null) {
+            vifr.otherConfig.put("cloudstack-vm-id", vmSpec.getUuid());
+        }
+
+        // OVS plugin looks at network UUID in the vif 'otherconfig' details to group VIF's & tunnel ports as part of tier
+        // when bridge is setup for distributed routing
+        vifr.otherConfig.put("cloudstack-network-id", nic.getNetworkUuid());
+
+        vifr.network = getNetwork(conn, nic);
+
+        if (nic.getNetworkRateMbps() != null && nic.getNetworkRateMbps().intValue() != -1) {
+            vifr.qosAlgorithmType = "ratelimit";
+            vifr.qosAlgorithmParams = new HashMap<String, String>();
+            // convert mbs to kilobyte per second
+            vifr.qosAlgorithmParams.put("kbps", Integer.toString(nic.getNetworkRateMbps() * 128));
+        }
+
+        vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
+        VIF vif = VIF.create(conn, vifr);
+        if (s_logger.isDebugEnabled()) {
+            vifr = vif.getRecord(conn);
+            s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId());
+        }
+
+        return vif;
+    }
+
+    protected void prepareISO(Connection conn, String vmName) throws XmlRpcException, XenAPIException {
+
+        Set<VM> vms = VM.getByNameLabel(conn, vmName);
+        if (vms == null || vms.size() != 1) {
+            throw new CloudRuntimeException("There are " + ((vms == null) ? "0" : vms.size()) + " VMs named " + vmName);
+        }
+        VM vm = vms.iterator().next();
+        Set<VBD> vbds = vm.getVBDs(conn);
+        for (VBD vbd : vbds) {
+            VBD.Record vbdr = vbd.getRecord(conn);
+            if (vbdr.type == Types.VbdType.CD && vbdr.empty == false) {
+                VDI vdi = vbdr.VDI;
+                SR sr = vdi.getSR(conn);
+                Set<PBD> pbds = sr.getPBDs(conn);
+                if (pbds == null) {
+                    throw new CloudRuntimeException("There is no pbd for sr " + sr);
+                }
+                for (PBD pbd : pbds) {
+                    PBD.Record pbdr = pbd.getRecord(conn);
+                    if (pbdr.host.getUuid(conn).equals(_host.uuid)) {
+                        return;
+                    }
+                }
+                sr.setShared(conn, true);
+                Host host = Host.getByUuid(conn, _host.uuid);
+                PBD.Record pbdr = pbds.iterator().next().getRecord(conn);
+                pbdr.host = host;
+                pbdr.uuid = "";
+                PBD pbd = PBD.create(conn, pbdr);
+                pbdPlug(conn, pbd, pbd.getUuid(conn));
+                break;
+            }
+        }
+    }
+
+    protected VDI mount(Connection conn, String vmName, DiskTO volume) throws XmlRpcException, XenAPIException {
+        DataTO data = volume.getData();
+        Volume.Type type = volume.getType();
+        if (type == Volume.Type.ISO) {
+            TemplateObjectTO iso = (TemplateObjectTO)data;
+            DataStoreTO store = iso.getDataStore();
+
+            if (store == null) {
+                //It's a fake iso
+                return null;
+            }
+
+            //corer case, xenserver pv driver iso
+            String templateName = iso.getName();
+            if (templateName.startsWith("xs-tools")) {
+                try {
+                    Set<VDI> vdis = VDI.getByNameLabel(conn, templateName);
+                    if (vdis.isEmpty()) {
+                        throw new CloudRuntimeException("Could not find ISO with URL: " + templateName);
+                    }
+                    return vdis.iterator().next();
+                } catch (XenAPIException e) {
+                    throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString());
+                } catch (Exception e) {
+                    throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString());
+                }
+            }
+
+            if (!(store instanceof NfsTO)) {
+                throw new CloudRuntimeException("only support mount iso on nfs");
+            }
+            NfsTO nfsStore = (NfsTO)store;
+            String isoPath = nfsStore.getUrl() + File.separator + iso.getPath();
+            int index = isoPath.lastIndexOf("/");
+
+            String mountpoint = isoPath.substring(0, index);
+            URI uri;
+            try {
+                uri = new URI(mountpoint);
+            } catch (URISyntaxException e) {
+                throw new CloudRuntimeException("Incorrect uri " + mountpoint, e);
+            }
+            SR isoSr = createIsoSRbyURI(conn, uri, vmName, false);
+
+            String isoname = isoPath.substring(index + 1);
+
+            VDI isoVdi = getVDIbyLocationandSR(conn, isoname, isoSr);
+
+            if (isoVdi == null) {
+                throw new CloudRuntimeException("Unable to find ISO " + isoPath);
+            }
+            return isoVdi;
+        } else {
+            VolumeObjectTO vol = (VolumeObjectTO)data;
+            return VDI.getByUuid(conn, vol.getPath());
+        }
+    }
+
+    protected VBD createVbd(Connection conn, DiskTO volume, String vmName, VM vm, BootloaderType bootLoaderType, VDI vdi) throws XmlRpcException, XenAPIException {
+        Volume.Type type = volume.getType();
+
+        if (vdi == null) {
+            vdi = mount(conn, vmName, volume);
+        }
+
+        if (vdi != null) {
+            if ("detached".equals(vdi.getNameLabel(conn))) {
+                vdi.setNameLabel(conn, vmName + "-DATA");
+            }
+
+            Map<String, String> smConfig = vdi.getSmConfig(conn);
+            for (String key : smConfig.keySet()) {
+                if (key.startsWith("host_")) {
+                    vdi.removeFromSmConfig(conn, key);
+                    break;
+                }
+            }
+        }
+        VBD.Record vbdr = new VBD.Record();
+        vbdr.VM = vm;
+        if (vdi != null) {
+            vbdr.VDI = vdi;
+        } else {
+            vbdr.empty = true;
+        }
+        if (type == Volume.Type.ROOT && bootLoaderType == BootloaderType.PyGrub) {
+            vbdr.bootable = true;
+        } else if (type == Volume.Type.ISO && bootLoaderType == BootloaderType.CD) {
+            vbdr.bootable = true;
+        }
+
+        vbdr.userdevice = Long.toString(volume.getDiskSeq());
+        if (volume.getType() == Volume.Type.ISO) {
+            vbdr.mode = Types.VbdMode.RO;
+            vbdr.type = Types.VbdType.CD;
+        } else if (volume.getType() == Volume.Type.ROOT) {
+            vbdr.mode = Types.VbdMode.RW;
+            vbdr.type = Types.VbdType.DISK;
+            vbdr.unpluggable = false;
+        } else {
+            vbdr.mode = Types.VbdMode.RW;
+            vbdr.type = Types.VbdType.DISK;
+            vbdr.unpluggable = true;
+        }
+        VBD vbd = VBD.create(conn, vbdr);
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("VBD " + vbd.getUuid(conn) + " created for " + volume);
+        }
+
+        return vbd;
+    }
+
+    public long getStaticMax(String os, boolean b, long dynamicMinRam, long dynamicMaxRam) {
+        return dynamicMaxRam;
+    }
+
+    public long getStaticMin(String os, boolean b, long dynamicMinRam, long dynamicMaxRam) {
+        return dynamicMinRam;
+    }
+
+    protected HashMap<String, HashMap<String, VgpuTypesInfo>> getGPUGroupDetails(Connection conn) throws XenAPIException, XmlRpcException {
+        return null;
+    }
+
+    protected void createVGPU(Connection conn, StartCommand cmd, VM vm, GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException {
+    }
+
+    protected VM createVmFromTemplate(Connection conn, VirtualMachineTO vmSpec, Host host) throws XenAPIException, XmlRpcException {
+        String guestOsTypeName = getGuestOsType(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD);
+        Set<VM> templates = VM.getByNameLabel(conn, guestOsTypeName);
+        assert templates.size() == 1 : "Should only have 1 template but found " + templates.size();
+        VM template = templates.iterator().next();
+
+        VM.Record vmr = template.getRecord(conn);
+        vmr.affinity = host;
+        vmr.otherConfig.remove("disks");
+        vmr.otherConfig.remove("default_template");
+        vmr.otherConfig.remove("mac_seed");
+        vmr.isATemplate = false;
+        vmr.nameLabel = vmSpec.getName();
+        vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY;
+        vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY;
+
+        if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) {
+            //scaling is allowed
+            vmr.memoryStaticMin = getStaticMin(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam());
+            vmr.memoryStaticMax = getStaticMax(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam());
+            vmr.memoryDynamicMin = vmSpec.getMinRam();
+            vmr.memoryDynamicMax = vmSpec.getMaxRam();
+        } else {
+            //scaling disallowed, set static memory target
+            if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) {
+                s_logger.warn("Host " + host.getHostname(conn) + " does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable");
+            }
+            vmr.memoryStaticMin = vmSpec.getMinRam();
+            vmr.memoryStaticMax = vmSpec.getMaxRam();
+            vmr.memoryDynamicMin = vmSpec.getMinRam();
+            vmr.memoryDynamicMax = vmSpec.getMaxRam();
+        }
+
+        if (guestOsTypeName.toLowerCase().contains("windows")) {
+            vmr.VCPUsMax = (long)vmSpec.getCpus();
+        } else {
+            // XenServer has a documented limit of 16 vcpus per vm
+            vmr.VCPUsMax = 2L * vmSpec.getCpus();
+            if (vmr.VCPUsMax > 16)
+            {
+                vmr.VCPUsMax = 16L;
+            }
+        }
+
+        vmr.VCPUsAtStartup = (long)vmSpec.getCpus();
+        vmr.consoles.clear();
+
+        VM vm = VM.create(conn, vmr);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName());
+        }
+
+        Map<String, String> vcpuParams = new HashMap<String, String>();
+
+        Integer speed = vmSpec.getMinSpeed();
+        if (speed != null) {
+
+            int cpuWeight = _maxWeight; // cpu_weight
+            int utilization = 0; // max CPU cap, default is unlimited
+
+            // weight based allocation, CPU weight is calculated per VCPU
+            cpuWeight = (int)((speed * 0.99) / _host.speed * _maxWeight);
+            if (cpuWeight > _maxWeight) {
+                cpuWeight = _maxWeight;
+            }
+
+            if (vmSpec.getLimitCpuUse()) {
+                // CPU cap is per VM, so need to assign cap based on the number of vcpus
+                utilization = (int)((vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus()) / _host.speed * 100);
+            }
+
+            vcpuParams.put("weight", Integer.toString(cpuWeight));
+            vcpuParams.put("cap", Integer.toString(utilization));
+
+        }
+
+        if (vcpuParams.size() > 0) {
+            vm.setVCPUsParams(conn, vcpuParams);
+        }
+
+        String bootArgs = vmSpec.getBootArgs();
+        if (bootArgs != null && bootArgs.length() > 0) {
+            String pvargs = vm.getPVArgs(conn);
+            pvargs = pvargs + vmSpec.getBootArgs().replaceAll(" ", "%");
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("PV args are " + pvargs);
+            }
+            vm.setPVArgs(conn, pvargs);
+        }
+
+        if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) {
+            if (vmSpec.getBootloader() == BootloaderType.CD) {
+                DiskTO[] disks = vmSpec.getDisks();
+                for (DiskTO disk : disks) {
+                    if (disk.getType() == Volume.Type.ISO) {
+                        TemplateObjectTO iso = (TemplateObjectTO)disk.getData();
+                        String osType = iso.getGuestOsType();
+                        if (osType != null) {
+                            String isoGuestOsName = getGuestOsType(osType, vmSpec.getBootloader() == BootloaderType.CD);
+                            if (!isoGuestOsName.equals(guestOsTypeName)) {
+                                vmSpec.setBootloader(BootloaderType.PyGrub);
+                            }
+                        }
+                    }
+                }
+            }
+            if (vmSpec.getBootloader() == BootloaderType.CD) {
+                vm.setPVBootloader(conn, "eliloader");
+                if (!vm.getOtherConfig(conn).containsKey("install-repository")) {
+                    vm.addToOtherConfig(conn, "install-repository", "cdrom");
+                }
+            } else if (vmSpec.getBootloader() == BootloaderType.PyGrub) {
+                vm.setPVBootloader(conn, "pygrub");
+            } else {
+                vm.destroy(conn);
+                throw new CloudRuntimeException("Unable to handle boot loader type: " + vmSpec.getBootloader());
+            }
+        }
+        try {
+            finalizeVmMetaData(vm, conn, vmSpec);
+        } catch (Exception e) {
+            throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec);
+        }
+        return vm;
+    }
+
+
+    protected void finalizeVmMetaData(VM vm, Connection conn, VirtualMachineTO vmSpec) throws Exception {
+
+        Map<String, String> details = vmSpec.getDetails();
+        if (details != null) {
+            String platformstring = details.get("platform");
+            if (platformstring != null && !platformstring.isEmpty()) {
+                Map<String, String> platform = StringUtils.stringToMap(platformstring);
+                vm.setPlatform(conn, platform);
+            } else {
+                String timeoffset = details.get("timeoffset");
+                if (timeoffset != null) {
+                    Map<String, String> platform = vm.getPlatform(conn);
+                    platform.put("timeoffset", timeoffset);
+                    vm.setPlatform(conn, platform);
+                }
+                String coresPerSocket = details.get("cpu.corespersocket");
+                if (coresPerSocket != null) {
+                    Map<String, String> platform = vm.getPlatform(conn);
+                    platform.put("cores-per-socket", coresPerSocket);
+                    vm.setPlatform(conn, platform);
+                }
+            }
+            String xenservertoolsversion = details.get("hypervisortoolsversion");
+            if (xenservertoolsversion == null || !xenservertoolsversion.equalsIgnoreCase("xenserver61")) {
+                Map<String, String> platform = vm.getPlatform(conn);
+                platform.remove("device_id");
+                vm.setPlatform(conn, platform);
+            }
+        }
+    }
+
+    protected String handleVmStartFailure(Connection conn, String vmName, VM vm, String message, Throwable th) {
+        String msg = "Unable to start " + vmName + " due to " + message;
+        s_logger.warn(msg, th);
+
+        if (vm == null) {
+            return msg;
+        }
+
+        try {
+            VM.Record vmr = vm.getRecord(conn);
+            List<Network> networks = new ArrayList<Network>();
+            for (VIF vif : vmr.VIFs) {
+                try {
+                    VIF.Record rec = vif.getRecord(conn);
+                    networks.add(rec.network);
+                } catch (Exception e) {
+                    s_logger.warn("Unable to cleanup VIF", e);
+                }
+            }
+            if (vmr.powerState == VmPowerState.RUNNING) {
+                try {
+                    vm.hardShutdown(conn);
+                } catch (Exception e) {
+                    s_logger.warn("VM hardshutdown failed due to ", e);
+                }
+            }
+            if (vm.getPowerState(conn) == VmPowerState.HALTED) {
+                try {
+                    vm.destroy(conn);
+                } catch (Exception e) {
+                    s_logger.warn("VM destroy failed due to ", e);
+                }
+            }
+            for (VBD vbd : vmr.VBDs) {
+                try {
+                    vbd.unplug(conn);
+                    vbd.destroy(conn);
+                } catch (Exception e) {
+                    s_logger.warn("Unable to clean up VBD due to ", e);
+                }
+            }
+            for (VIF vif : vmr.VIFs) {
+                try {
+                    vif.unplug(conn);
+                    vif.destroy(conn);
+                } catch (Exception e) {
+                    s_logger.warn("Unable to cleanup VIF", e);
+                }
+            }
+            for (Network network : networks) {
+                if (network.getNameLabel(conn).startsWith("VLAN")) {
+                    disableVlanNetwork(conn, network);
+                }
+            }
+        } catch (Exception e) {
+            s_logger.warn("VM getRecord failed due to ", e);
+        }
+
+        return msg;
+    }
+
+    protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
+
+        if (_host.systemvmisouuid == null) {
+            Set<SR> srs = SR.getByNameLabel(conn, "XenServer Tools");
+            if (srs.size() != 1) {
+                throw new CloudRuntimeException("There are " + srs.size() + " SRs with name XenServer Tools");
+            }
+            SR sr = srs.iterator().next();
+            sr.scan(conn);
+
+            SR.Record srr = sr.getRecord(conn);
+
+            if (_host.systemvmisouuid == null) {
+                for (VDI vdi : srr.VDIs) {
+                    VDI.Record vdir = vdi.getRecord(conn);
+                    if (vdir.nameLabel.contains("systemvm.iso")) {
+                        _host.systemvmisouuid = vdir.uuid;
+                        break;
+                    }
+                }
+            }
+            if (_host.systemvmisouuid == null) {
+                throw new CloudRuntimeException("can not find systemvmiso");
+            }
+        }
+
+        VBD.Record cdromVBDR = new VBD.Record();
+        cdromVBDR.VM = vm;
+        cdromVBDR.empty = true;
+        cdromVBDR.bootable = false;
+        cdromVBDR.userdevice = "3";
+        cdromVBDR.mode = Types.VbdMode.RO;
+        cdromVBDR.type = Types.VbdType.CD;
+        VBD cdromVBD = VBD.create(conn, cdromVBDR);
+        cdromVBD.insert(conn, VDI.getByUuid(conn, _host.systemvmisouuid));
+
+        return cdromVBD;
+    }
+
+    protected void destroyPatchVbd(Connection conn, String vmName) throws XmlRpcException, XenAPIException {
+        try {
+            if (!vmName.startsWith("r-") && !vmName.startsWith("s-") && !vmName.startsWith("v-")) {
+                return;
+            }
+            Set<VM> vms = VM.getByNameLabel(conn, vmName);
+            for (VM vm : vms) {
+                Set<VBD> vbds = vm.getVBDs(conn);
+                for (VBD vbd : vbds) {
+                    if (vbd.getType(conn) == Types.VbdType.CD) {
+                        vbd.eject(conn);
+                        vbd.destroy(conn);
+                        break;
+                    }
+                }
+            }
+        } catch (Exception e) {
+            s_logger.debug("Cannot destory CD-ROM device for VM " + vmName + " due to " + e.toString(), e);
+        }
+    }
+
+    protected CheckSshAnswer execute(CheckSshCommand cmd) {
+        Connection conn = getConnection();
+        String vmName = cmd.getName();
+        String privateIp = cmd.getIp();
+        int cmdPort = cmd.getPort();
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort);
+        }
+
+        try {
+            String result = connect(conn, cmd.getName(), privateIp, cmdPort);
+            if (result != null) {
+                return new CheckSshAnswer(cmd, "Can not ping System vm " + vmName + "due to:" + result);
+            }
+            destroyPatchVbd(conn, vmName);
+        } catch (Exception e) {
+            return new CheckSshAnswer(cmd, e);
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Ping command port succeeded for vm " + vmName);
+        }
+
+        return new CheckSshAnswer(cmd);
+    }
+
+    private HashMap<String, String> parseDefaultOvsRuleComamnd(String str) {
+        HashMap<String, String> cmd = new HashMap<String, String>();
+        String[] sarr = str.split("/");
+        for (int i = 0; i < sarr.length; i++) {
+            String c = sarr[i];
+            c = c.startsWith("/") ? c.substring(1) : c;
+            c = c.endsWith("/") ? c.substring(0, c.length() - 1) : c;
+            String[] p = c.split(";");
+            if (p.length != 2) {
+                continue;
+            }
+            if (p[0].equalsIgnoreCase("vlans")) {
+                p[1] = p[1].replace("@", "[");
+                p[1] = p[1].replace("#", "]");
+            }
+            cmd.put(p[0], p[1]);
+        }
+        return cmd;
+    }
+
+    private void cleanUpTmpDomVif(Connection conn, Network nw) throws XenAPIException, XmlRpcException {
+
+        Pair<VM, VM.Record> vm = getControlDomain(conn);
+        VM dom0 = vm.first();
+        Set<VIF> dom0Vifs = dom0.getVIFs(conn);
+        for (VIF v : dom0Vifs) {
+            String vifName = "unknown";
+            try {
+                VIF.Record vifr = v.getRecord(conn);
+                if (v.getNetwork(conn).getUuid(conn).equals(nw.getUuid(conn))) {
+                    Map<String, String> config = vifr.otherConfig;
+                    vifName = config.get("nameLabel");
+                    s_logger.debug("A VIF in dom0 for the network is found - so destroy the vif");
+                    v.destroy(conn);
+                    s_logger.debug("Destroy temp dom0 vif" + vifName + " success");
+                }
+            } catch (Exception e) {
+                s_logger.warn("Destroy temp dom0 vif " + vifName + "failed", e);
+            }
+        }
+    }
+
+    private Answer execute(PvlanSetupCommand cmd) {
+        Connection conn = getConnection();
+
+        String primaryPvlan = cmd.getPrimary();
+        String isolatedPvlan = cmd.getIsolated();
+        String op = cmd.getOp();
+        String dhcpName = cmd.getDhcpName();
+        String dhcpMac = cmd.getDhcpMac();
+        String dhcpIp = cmd.getDhcpIp();
+        String vmMac = cmd.getVmMac();
+        String networkTag = cmd.getNetworkTag();
+
+        XsLocalNetwork nw = null;
+        String nwNameLabel = null;
+        try {
+            nw = getNativeNetworkForTraffic(conn, TrafficType.Guest, networkTag);
+            nwNameLabel = nw.getNetwork().getNameLabel(conn);
+        } catch (XenAPIException e) {
+            s_logger.warn("Fail to get network", e);
+            return new Answer(cmd, false, e.toString());
+        } catch (XmlRpcException e) {
+            s_logger.warn("Fail to get network", e);
+            return new Answer(cmd, false, e.toString());
+        }
+
+        String result = null;
+        if (cmd.getType() == PvlanSetupCommand.Type.DHCP) {
+            result =
+                    callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-dhcp", "op", op, "nw-label", nwNameLabel, "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan,
+                            "dhcp-name", dhcpName, "dhcp-ip", dhcpIp, "dhcp-mac", dhcpMac);
+            if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac);
+                return new Answer(cmd, false, result);
+            } else {
+                s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac);
+            }
+        } else if (cmd.getType() == PvlanSetupCommand.Type.VM) {
+            result =
+                    callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-vm", "op", op, "nw-label", nwNameLabel, "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan,
+                            "vm-mac", vmMac);
+            if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                s_logger.warn("Failed to program pvlan for vm with mac " + vmMac);
+                return new Answer(cmd, false, result);
+            } else {
+                s_logger.info("Programmed pvlan for vm with mac " + vmMac);
+            }
+        }
+        return new Answer(cmd, true, result);
+    }
+
+    @Override
+    public StartAnswer execute(StartCommand cmd) {
+        Connection conn = getConnection();
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        String vmName = vmSpec.getName();
+        State state = State.Stopped;
+        VM vm = null;
+        // if a VDI is created, record its UUID to send back to the CS MS
+        Map<String, String> iqnToPath = new HashMap<String, String>();
+        try {
+            Set<VM> vms = VM.getByNameLabel(conn, vmName);
+            if (vms != null) {
+                for (VM v : vms) {
+                    VM.Record vRec = v.getRecord(conn);
+                    if (vRec.powerState == VmPowerState.HALTED) {
+                        v.destroy(conn);
+                    } else if (vRec.powerState == VmPowerState.RUNNING) {
+                        String host = vRec.residentOn.getUuid(conn);
+                        String msg = "VM " + vmName + " is runing on host " + host;
+                        s_logger.debug(msg);
+                        return new StartAnswer(cmd, msg, host);
+                    } else {
+                        String msg = "There is already a VM having the same name " + vmName + " vm record " + vRec.toString();
+                        s_logger.warn(msg);
+                        return new StartAnswer(cmd, msg);
+                    }
+                }
+            }
+            synchronized (_cluster.intern()) {
+                s_vms.put(_cluster, _name, vmName, State.Starting);
+            }
+            s_logger.debug("1. The VM " + vmName + " is in Starting state.");
+
+            Host host = Host.getByUuid(conn, _host.uuid);
+            vm = createVmFromTemplate(conn, vmSpec, host);
+
+            GPUDeviceTO gpuDevice = vmSpec.getGpuDevice();
+            if (gpuDevice != null) {
+                s_logger.debug("Creating VGPU for of VGPU type: " + gpuDevice.getVgpuType() + " in GPU group "
+                        + gpuDevice.getGpuGroup() + " for VM " + vmName );
+                createVGPU(conn, cmd, vm, gpuDevice);
+            }
+
+            for (DiskTO disk : vmSpec.getDisks()) {
+                VDI newVdi = prepareManagedDisk(conn, disk, vmName);
+
+                if (newVdi != null) {
+                    String path = newVdi.getUuid(conn);
+
+                    iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path);
+                }
+
+                createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), newVdi);
+            }
+
+            if (vmSpec.getType() != VirtualMachine.Type.User) {
+                createPatchVbd(conn, vmName, vm);
+            }
+
+            for (NicTO nic : vmSpec.getNics()) {
+                createVif(conn, vmName, vm, vmSpec, nic);
+            }
+
+            startVM(conn, host, vm, vmName);
+
+            if (_isOvs) {
+                // TODO(Salvatore-orlando): This code should go
+                for (NicTO nic : vmSpec.getNics()) {
+                    if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vswitch) {
+                        HashMap<String, String> args = parseDefaultOvsRuleComamnd(BroadcastDomainType.getValue(nic.getBroadcastUri()));
+                        OvsSetTagAndFlowCommand flowCmd =
+                                new OvsSetTagAndFlowCommand(args.get("vmName"), args.get("tag"), args.get("vlans"), args.get("seqno"), Long.parseLong(args.get("vmId")));
+                        OvsSetTagAndFlowAnswer r = execute(flowCmd);
+                        if (!r.getResult()) {
+                            s_logger.warn("Failed to set flow for VM " + r.getVmId());
+                        } else {
+                            s_logger.info("Success to set flow for VM " + r.getVmId());
+                        }
+                    }
+                }
+            }
+
+            if (_canBridgeFirewall) {
+                String result = null;
+                if (vmSpec.getType() != VirtualMachine.Type.User) {
+                    NicTO[] nics = vmSpec.getNics();
+                    boolean secGrpEnabled = false;
+                    for (NicTO nic : nics) {
+                        if (nic.isSecurityGroupEnabled() ||
+                                (nic.getIsolationUri() != null && nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString()))) {
+                            secGrpEnabled = true;
+                            break;
+                        }
+                    }
+                    if (secGrpEnabled) {
+                        result = callHostPlugin(conn, "vmops", "default_network_rules_systemvm", "vmName", vmName);
+                        if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                            s_logger.warn("Failed to program default network rules for " + vmName);
+                        } else {
+                            s_logger.info("Programmed default network rules for " + vmName);
+                        }
+                    }
+
+                } else {
+                    //For user vm, program the rules for each nic if the isolation uri scheme is ec2
+                    NicTO[] nics = vmSpec.getNics();
+                    for (NicTO nic : nics) {
+                        if (nic.isSecurityGroupEnabled() || nic.getIsolationUri() != null &&
+                                nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString())) {
+                            List<String> nicSecIps = nic.getNicSecIps();
+                            String secIpsStr;
+                            StringBuilder sb = new StringBuilder();
+                            if (nicSecIps != null) {
+                                for (String ip : nicSecIps) {
+                                    sb.append(ip).append(":");
+                                }
+                                secIpsStr = sb.toString();
+                            } else {
+                                secIpsStr = "0:";
+                            }
+                            result =
+                                    callHostPlugin(conn, "vmops", "default_network_rules", "vmName", vmName, "vmIP", nic.getIp(), "vmMAC", nic.getMac(), "vmID",
+                                            Long.toString(vmSpec.getId()), "secIps", secIpsStr);
+
+                            if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
+                                s_logger.warn("Failed to program default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac());
+                            } else {
+                                s_logger.info("Programmed default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac());
+                            }
+                        }
+                    }
+                }
+            }
+
+            state = State.Running;
+
+            StartAnswer startAnswer = new StartAnswer(cmd);
+
+            startAnswer.setIqnToPath(iqnToPath);
+
+            return startAnswer;
+        } catch (Exception e) {
+            s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e);
+            String msg = handleVmStartFailure(conn, vmName, vm, "", e);
+
+            StartAnswer startAnswer = new StartAnswer(cmd, msg);
+
+            startAnswer.setIqnToPath(iqnToPath);
+
+            return startAnswer;
+        } finally {
+            synchronized (_cluster.intern()) {
+                if (state != State.Stopped) {
+                    s_vms.put(_cluster, _name, vmName, state);
+                    s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
+                } else {
+                    s_vms.remove(_cluster, _name, vmName);
+                    s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName);
+                }
+            }
+        }
+    }
+
+    // the idea here is to see if the DiskTO in question is from managed storage and
+    // does not yet have an SR
+    // if no SR, create it and create a VDI in it
+    private VDI prepareManagedDisk(Connection conn, DiskTO disk, String vmName) throws Exception {
+        Map<String, String> details = disk.getDetails();
+
+        if (details == null) {
+            return null;
+        }
+
+        boolean isManaged = new Boolean(details.get(DiskTO.MANAGED)).booleanValue();
+
+        if (!isManaged) {
+            return null;
+        }
+
+        String iqn = details.get(DiskTO.IQN);
+
+        Set<SR> srNameLabels = SR.getByNameLabel(conn, iqn);
+
+        if (srNameLabels.size() != 0) {
+            return null;
+        }
+
+        String vdiNameLabel = vmName + "-DATA";
+
+        return prepareManagedStorage(conn, details, null, vdiNameLabel);
+    }
+
+    protected SR prepareManagedSr(Connection conn, Map<String, String> details) {
+        String iScsiName = details.get(DiskTO.IQN);
+        String storageHost = details.get(DiskTO.STORAGE_HOST);
+        String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME);
+        String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET);
+
+        return getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, true);
+    }
+
+    protected VDI prepareManagedStorage(Connection conn, Map<String, String> details, String path, String vdiNameLabel) throws Exception {
+        SR sr = prepareManagedSr(conn, details);
+
+        VDI vdi = getVDIbyUuid(conn, path, false);
+
+        if (vdi == null) {
+            Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE));
+
+            vdi = createVdi(sr, vdiNameLabel, volumeSize);
+        }
+
+        return vdi;
+    }
+
+    protected Answer execute(ModifySshKeysCommand cmd) {
+        return new Answer(cmd);
+    }
+
+    

<TRUNCATED>