You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by da...@apache.org on 2019/05/23 12:30:44 UTC

[cloudstack] 01/01: Merge release branch 4.11 to 4.12

This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch 4.12
in repository https://gitbox.apache.org/repos/asf/cloudstack.git

commit 29918e25e376d1375928f8bd51648d760ad2492e
Merge: 00ff536 e86f671
Author: Daan Hoogland <da...@shapeblue.com>
AuthorDate: Thu May 23 14:29:41 2019 +0200

    Merge release branch 4.11 to 4.12
    
    * 4.11:
      KVM: Fix agents dont reconnect post maintenance (#3239)

 agent/src/main/java/com/cloud/agent/Agent.java     |   4 +-
 .../java/com/cloud/resource/ResourceManager.java   |   5 +
 .../java/com/cloud/agent/manager/AgentAttache.java |   3 +-
 .../main/java/com/cloud/configuration/Config.java  |   8 -
 .../com/cloud/resource/ResourceManagerImpl.java    |  92 +++--
 .../cloud/resource/ResourceManagerImplTest.java    | 105 +++++-
 test/integration/smoke/test_host_maintenance.py    | 402 ++++++++++++++++++---
 7 files changed, 530 insertions(+), 89 deletions(-)

diff --cc server/src/main/java/com/cloud/configuration/Config.java
index d365ef0,0000000..87cf779
mode 100644,000000..100644
--- a/server/src/main/java/com/cloud/configuration/Config.java
+++ b/server/src/main/java/com/cloud/configuration/Config.java
@@@ -1,2018 -1,0 +1,2010 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package com.cloud.configuration;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.StringTokenizer;
 +
 +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
 +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
 +import org.apache.cloudstack.framework.config.ConfigKey;
 +
 +import com.cloud.agent.AgentManager;
 +import com.cloud.consoleproxy.ConsoleProxyManager;
 +import com.cloud.ha.HighAvailabilityManager;
 +import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.network.router.VpcVirtualNetworkApplianceManager;
 +import com.cloud.network.vpc.VpcManager;
 +import com.cloud.server.ManagementServer;
 +import com.cloud.storage.StorageManager;
 +import com.cloud.storage.secondary.SecondaryStorageVmManager;
 +import com.cloud.storage.snapshot.SnapshotManager;
 +import com.cloud.template.TemplateManager;
 +import com.cloud.vm.UserVmManager;
 +import com.cloud.vm.snapshot.VMSnapshotManager;
 +
 +/**
 + * @deprecated use the more dynamic ConfigKey
 + */
 +@Deprecated
 +public enum Config {
 +
 +    // Alert
 +
 +    AlertEmailAddresses(
 +            "Alert",
 +            ManagementServer.class,
 +            String.class,
 +            "alert.email.addresses",
 +            null,
 +            "Comma separated list of email addresses used for sending alerts.",
 +            null),
 +    AlertEmailSender("Alert", ManagementServer.class, String.class, "alert.email.sender", null, "Sender of alert email (will be in the From header of the email).", null),
 +    AlertSMTPHost("Alert", ManagementServer.class, String.class, "alert.smtp.host", null, "SMTP hostname used for sending out email alerts.", null),
 +    AlertSMTPPassword(
 +            "Secure",
 +            ManagementServer.class,
 +            String.class,
 +            "alert.smtp.password",
 +            null,
 +            "Password for SMTP authentication (applies only if alert.smtp.useAuth is true).",
 +            null),
 +    AlertSMTPPort("Alert", ManagementServer.class, Integer.class, "alert.smtp.port", "465", "Port the SMTP server is listening on.", null),
 +    AlertSMTPConnectionTimeout("Alert", ManagementServer.class, Integer.class, "alert.smtp.connectiontimeout", "30000",
 +            "Socket connection timeout value in milliseconds. -1 for infinite timeout.", null),
 +    AlertSMTPTimeout(
 +            "Alert",
 +            ManagementServer.class,
 +            Integer.class,
 +            "alert.smtp.timeout",
 +            "30000",
 +            "Socket I/O timeout value in milliseconds. -1 for infinite timeout.",
 +            null),
 +    AlertSMTPUseAuth("Alert", ManagementServer.class, String.class, "alert.smtp.useAuth", null, "If true, use SMTP authentication when sending emails.", null),
 +    AlertSMTPUsername(
 +            "Alert",
 +            ManagementServer.class,
 +            String.class,
 +            "alert.smtp.username",
 +            null,
 +            "Username for SMTP authentication (applies only if alert.smtp.useAuth is true).",
 +            null),
 +    CapacityCheckPeriod("Alert", ManagementServer.class, Integer.class, "capacity.check.period", "300000", "The interval in milliseconds between capacity checks", null),
 +    PublicIpCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "zone.virtualnetwork.publicip.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of public IP address space utilization above which alerts will be sent.",
 +            null),
 +    PrivateIpCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "pod.privateip.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of private IP address space utilization above which alerts will be sent.",
 +            null),
 +    SecondaryStorageCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "zone.secstorage.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of secondary storage utilization above which alerts will be sent about low storage available.",
 +            null),
 +    VlanCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "zone.vlan.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of Zone Vlan utilization above which alerts will be sent about low number of Zone Vlans.",
 +            null),
 +    DirectNetworkPublicIpCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "zone.directnetwork.publicip.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of Direct Network Public Ip Utilization above which alerts will be sent about low number of direct network public ips.",
 +            null),
 +    LocalStorageCapacityThreshold(
 +            "Alert",
 +            ManagementServer.class,
 +            Float.class,
 +            "cluster.localStorage.capacity.notificationthreshold",
 +            "0.75",
 +            "Percentage (as a value between 0 and 1) of local storage utilization above which alerts will be sent about low local storage available.",
 +            null),
 +
 +    // Storage
 +
 +    StorageStatsInterval(
 +            "Storage",
 +            ManagementServer.class,
 +            String.class,
 +            "storage.stats.interval",
 +            "60000",
 +            "The interval (in milliseconds) when storage stats (per host) are retrieved from agents.",
 +            null),
 +    MaxVolumeSize("Storage", ManagementServer.class, Integer.class, "storage.max.volume.size", "2000", "The maximum size for a volume (in GB).", null),
 +    StorageCacheReplacementLRUTimeInterval(
 +            "Storage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "storage.cache.replacement.lru.interval",
 +            "30",
 +            "time interval for unused data on cache storage (in days).",
 +            null),
 +    StorageCacheReplacementEnabled(
 +            "Storage",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "storage.cache.replacement.enabled",
 +            "true",
 +            "enable or disable cache storage replacement algorithm.",
 +            null),
 +    StorageCacheReplacementInterval(
 +            "Storage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "storage.cache.replacement.interval",
 +            "86400",
 +            "time interval between cache replacement threads (in seconds).",
 +            null),
 +    MaxUploadVolumeSize("Storage", ManagementServer.class, Integer.class, "storage.max.volume.upload.size", "500", "The maximum size for a uploaded volume(in GB).", null),
 +    TotalRetries(
 +            "Storage",
 +            AgentManager.class,
 +            Integer.class,
 +            "total.retries",
 +            "4",
 +            "The number of times each command sent to a host should be retried in case of failure.",
 +            null),
 +    StoragePoolMaxWaitSeconds(
 +            "Storage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "storage.pool.max.waitseconds",
 +            "3600",
 +            "Timeout (in seconds) to synchronize storage pool operations.",
 +            null),
 +    CreateVolumeFromSnapshotWait(
 +            "Storage",
 +            StorageManager.class,
 +            Integer.class,
 +            "create.volume.from.snapshot.wait",
 +            "10800",
 +            "In second, timeout for creating volume from snapshot",
 +            null),
 +    CopyVolumeWait("Storage", StorageManager.class, Integer.class, "copy.volume.wait", "10800", "In second, timeout for copy volume command", null),
 +    CreatePrivateTemplateFromVolumeWait(
 +            "Storage",
 +            UserVmManager.class,
 +            Integer.class,
 +            "create.private.template.from.volume.wait",
 +            "10800",
 +            "In second, timeout for CreatePrivateTemplateFromVolumeCommand",
 +            null),
 +    CreatePrivateTemplateFromSnapshotWait(
 +            "Storage",
 +            UserVmManager.class,
 +            Integer.class,
 +            "create.private.template.from.snapshot.wait",
 +            "10800",
 +            "In second, timeout for CreatePrivateTemplateFromSnapshotCommand",
 +            null),
 +    BackupSnapshotWait("Storage", StorageManager.class, Integer.class, "backup.snapshot.wait", "21600", "In second, timeout for BackupSnapshotCommand", null),
 +    HAStorageMigration(
 +            "Storage",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "enable.ha.storage.migration",
 +            "true",
 +            "Enable/disable storage migration across primary storage during HA",
 +            null),
 +
 +    // Network
 +    NetworkLBHaproxyStatsVisbility(
 +            "Network",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.haproxy.stats.visibility",
 +            "global",
 +            "Load Balancer(haproxy) stats visibilty, the value can be one of the following six parameters : global,guest-network,link-local,disabled,all,default",
 +            null),
 +    NetworkLBHaproxyStatsUri(
 +            "Network",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.haproxy.stats.uri",
 +            "/admin?stats",
 +            "Load Balancer(haproxy) uri.",
 +            null),
 +    NetworkLBHaproxyStatsAuth(
 +            "Secure",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.haproxy.stats.auth",
 +            "admin1:AdMiN123",
 +            "Load Balancer(haproxy) authetication string in the format username:password",
 +            null),
 +    NetworkLBHaproxyStatsPort(
 +            "Network",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.haproxy.stats.port",
 +            "8081",
 +            "Load Balancer(haproxy) stats port number.",
 +            null),
 +    NetworkLBHaproxyMaxConn(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.loadbalancer.haproxy.max.conn",
 +            "4096",
 +            "Load Balancer(haproxy) maximum number of concurrent connections(global max)",
 +            null),
 +    NetworkRouterRpFilter(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.disable.rpfilter",
 +            "true",
 +            "disable rp_filter on Domain Router VM public interfaces.",
 +            null),
 +
 +    GuestVlanBits(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "guest.vlan.bits",
 +            "12",
 +            "The number of bits to reserve for the VLAN identifier in the guest subnet.",
 +            null),
 +    //MulticastThrottlingRate("Network", ManagementServer.class, Integer.class, "multicast.throttling.rate", "10", "Default multicast rate in megabits per second allowed.", null),
 +    DirectNetworkNoDefaultRoute(
 +            "Network",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "direct.network.no.default.route",
 +            "false",
 +            "Direct Network Dhcp Server should not send a default route",
 +            "true/false"),
 +    OvsTunnelNetworkDefaultLabel(
 +            "Network",
 +            ManagementServer.class,
 +            String.class,
 +            "sdn.ovs.controller.default.label",
 +            "cloud-public",
 +            "Default network label to be used when fetching interface for GRE endpoints",
 +            null),
 +    VmNetworkThrottlingRate(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.network.throttling.rate",
 +            "200",
 +            "Default data transfer rate in megabits per second allowed in User vm's default network.",
 +            null),
 +
 +    SecurityGroupWorkCleanupInterval(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.securitygroups.work.cleanup.interval",
 +            "120",
 +            "Time interval (seconds) in which finished work is cleaned up from the work table",
 +            null),
 +    SecurityGroupWorkerThreads(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.securitygroups.workers.pool.size",
 +            "50",
 +            "Number of worker threads processing the security group update work queue",
 +            null),
 +    SecurityGroupWorkGlobalLockTimeout(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.securitygroups.work.lock.timeout",
 +            "300",
 +            "Lock wait timeout (seconds) while updating the security group work queue",
 +            null),
 +    SecurityGroupWorkPerAgentMaxQueueSize(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.securitygroups.work.per.agent.queue.size",
 +            "100",
 +            "The number of outstanding security group work items that can be queued to a host. If exceeded, work items will get dropped to conserve memory. Security Group Sync will take care of ensuring that the host gets updated eventually",
 +            null),
 +
 +    SecurityGroupDefaultAdding(
 +            "Network",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "network.securitygroups.defaultadding",
 +            "true",
 +            "If true, the user VM would be added to the default security group by default",
 +            null),
 +
 +    GuestOSNeedGatewayOnNonDefaultNetwork(
 +            "Network",
 +            NetworkOrchestrationService.class,
 +            String.class,
 +            "network.dhcp.nondefaultnetwork.setgateway.guestos",
 +            "Windows",
 +            "The guest OS's name start with this fields would result in DHCP server response gateway information even when the network it's on is not default network. Names are separated by comma.",
 +            null),
 +
 +    //VPN
 +    RemoteAccessVpnPskLength(
 +            "Network",
 +            AgentManager.class,
 +            Integer.class,
 +            "remote.access.vpn.psk.length",
 +            "24",
 +            "The length of the ipsec preshared key (minimum 8, maximum 256)",
 +            null),
 +    RemoteAccessVpnUserLimit(
 +            "Network",
 +            AgentManager.class,
 +            String.class,
 +            "remote.access.vpn.user.limit",
 +            "8",
 +            "The maximum number of VPN users that can be created per account",
 +            null),
 +    Site2SiteVpnConnectionPerVpnGatewayLimit(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "site2site.vpn.vpngateway.connection.limit",
 +            "4",
 +            "The maximum number of VPN connection per VPN gateway",
 +            null),
 +    Site2SiteVpnSubnetsPerCustomerGatewayLimit(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "site2site.vpn.customergateway.subnets.limit",
 +            "10",
 +            "The maximum number of subnets per customer gateway",
 +            null),
 +    MaxNumberOfSecondaryIPsPerNIC(
 +            "Network", ManagementServer.class, Integer.class,
 +            "vm.network.nic.max.secondary.ipaddresses", "256",
 +            "Specify the number of secondary ip addresses per nic per vm. Default value 10 is used, if not specified.", null),
 +
 +    EnableServiceMonitoring(
 +            "Network", ManagementServer.class, Boolean.class,
 +            "network.router.enableserviceMonitoring", "false",
 +            "service monitoring in router enable/disable option, default false", null),
 +
 +
 +    // Console Proxy
 +    ConsoleProxyCapacityStandby(
 +            "Console Proxy",
 +            AgentManager.class,
 +            String.class,
 +            "consoleproxy.capacity.standby",
 +            "10",
 +            "The minimal number of console proxy viewer sessions that system is able to serve immediately(standby capacity)",
 +            null),
 +    ConsoleProxyCapacityScanInterval(
 +            "Console Proxy",
 +            AgentManager.class,
 +            String.class,
 +            "consoleproxy.capacityscan.interval",
 +            "30000",
 +            "The time interval(in millisecond) to scan whether or not system needs more console proxy to ensure minimal standby capacity",
 +            null),
 +    ConsoleProxyCmdPort(
 +            "Console Proxy",
 +            AgentManager.class,
 +            Integer.class,
 +            "consoleproxy.cmd.port",
 +            "8001",
 +            "Console proxy command port that is used to communicate with management server",
 +            null),
 +    ConsoleProxyRestart("Console Proxy", AgentManager.class, Boolean.class, "consoleproxy.restart", "true", "Console proxy restart flag, defaulted to true", null),
 +    ConsoleProxyUrlDomain("Console Proxy", AgentManager.class, String.class, "consoleproxy.url.domain", "", "Console proxy url domain", "domainName"),
 +    ConsoleProxySessionMax(
 +            "Console Proxy",
 +            AgentManager.class,
 +            Integer.class,
 +            "consoleproxy.session.max",
 +            String.valueOf(ConsoleProxyManager.DEFAULT_PROXY_CAPACITY),
 +            "The max number of viewer sessions console proxy is configured to serve for",
 +            null),
 +    ConsoleProxySessionTimeout(
 +            "Console Proxy",
 +            AgentManager.class,
 +            Integer.class,
 +            "consoleproxy.session.timeout",
 +            "300000",
 +            "Timeout(in milliseconds) that console proxy tries to maintain a viewer session before it times out the session for no activity",
 +            null),
 +    ConsoleProxyDisableRpFilter(
 +            "Console Proxy",
 +            AgentManager.class,
 +            Integer.class,
 +            "consoleproxy.disable.rpfilter",
 +            "true",
 +            "disable rp_filter on console proxy VM public interface",
 +            null),
 +    ConsoleProxyLaunchMax(
 +            "Console Proxy",
 +            AgentManager.class,
 +            Integer.class,
 +            "consoleproxy.launch.max",
 +            "10",
 +            "maximum number of console proxy instances per zone can be launched",
 +            null),
 +    ConsoleProxyManagementState(
 +            "Console Proxy",
 +            AgentManager.class,
 +            String.class,
 +            "consoleproxy.management.state",
 +            com.cloud.consoleproxy.ConsoleProxyManagementState.Auto.toString(),
 +            "console proxy service management state",
 +            null),
 +    ConsoleProxyManagementLastState(
 +            "Console Proxy",
 +            AgentManager.class,
 +            String.class,
 +            "consoleproxy.management.state.last",
 +            com.cloud.consoleproxy.ConsoleProxyManagementState.Auto.toString(),
 +            "last console proxy service management state",
 +            null),
 +
 +    // Snapshots
 +
 +    SnapshotPollInterval(
 +            "Snapshots",
 +            SnapshotManager.class,
 +            Integer.class,
 +            "snapshot.poll.interval",
 +            "300",
 +            "The time interval in seconds when the management server polls for snapshots to be scheduled.",
 +            null),
 +    SnapshotDeltaMax("Snapshots", SnapshotManager.class, Integer.class, "snapshot.delta.max", "16", "max delta snapshots between two full snapshots.", null),
 +    KVMSnapshotEnabled("Hidden", SnapshotManager.class, Boolean.class, "kvm.snapshot.enabled", "false", "whether snapshot is enabled for KVM hosts", null),
 +
 +    // Advanced
 +    EventPurgeInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "event.purge.interval",
 +            "86400",
 +            "The interval (in seconds) to wait before running the event purge thread",
 +            null),
 +    AccountCleanupInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "account.cleanup.interval",
 +            "86400",
 +            "The interval (in seconds) between cleanup for removed accounts",
 +            null),
 +    InstanceName("Advanced", AgentManager.class, String.class, "instance.name", "VM", "Name of the deployment instance.", "instanceName"),
 +    ExpungeDelay(
 +            "Advanced",
 +            UserVmManager.class,
 +            Integer.class,
 +            "expunge.delay",
 +            "86400",
 +            "Determines how long (in seconds) to wait before actually expunging destroyed vm. The default value = the default value of expunge.interval",
 +            null),
 +    ExpungeInterval(
 +            "Advanced",
 +            UserVmManager.class,
 +            Integer.class,
 +            "expunge.interval",
 +            "86400",
 +            "The interval (in seconds) to wait before running the expunge thread.",
 +            null),
 +    ExpungeWorkers("Advanced", UserVmManager.class, Integer.class, "expunge.workers", "1", "Number of workers performing expunge ", null),
 +    ExtractURLCleanUpInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "extract.url.cleanup.interval",
 +            "7200",
 +            "The interval (in seconds) to wait before cleaning up the extract URL's ",
 +            null),
 +    DisableExtraction(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "disable.extraction",
 +            "false",
 +            "Flag for disabling extraction of template, isos and volumes",
 +            null),
 +    ExtractURLExpirationInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "extract.url.expiration.interval",
 +            "14400",
 +            "The life of an extract URL after which it is deleted ",
 +            null),
 +    HostStatsInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "host.stats.interval",
 +            "60000",
 +            "The interval (in milliseconds) when host stats are retrieved from agents.",
 +            null),
 +    HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null),
 +    InvestigateRetryInterval(
 +            "Advanced",
 +            HighAvailabilityManager.class,
 +            Integer.class,
 +            "investigate.retry.interval",
 +            "60",
 +            "Time (in seconds) between VM pings when agent is disconnected",
 +            null),
 +    MigrateRetryInterval("Advanced", HighAvailabilityManager.class, Integer.class, "migrate.retry.interval", "120", "Time (in seconds) between migration retries", null),
 +    RouterCpuMHz(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.cpu.mhz",
 +            String.valueOf(VpcVirtualNetworkApplianceManager.DEFAULT_ROUTER_CPU_MHZ),
 +            "Default CPU speed (MHz) for router VM.",
 +            null),
 +    RestartRetryInterval(
 +            "Advanced",
 +            HighAvailabilityManager.class,
 +            Integer.class,
 +            "restart.retry.interval",
 +            "600",
 +            "Time (in seconds) between retries to restart a vm",
 +            null),
 +    RouterStatsInterval(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.stats.interval",
 +            "300",
 +            "Interval (in seconds) to report router statistics.",
 +            null),
 +    ExternalNetworkStatsInterval(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "external.network.stats.interval",
 +            "300",
 +            "Interval (in seconds) to report external network statistics.",
 +            null),
 +    RouterCheckInterval(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.check.interval",
 +            "30",
 +            "Interval (in seconds) to report redundant router status.",
 +            null),
 +    RouterCheckPoolSize(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.check.poolsize",
 +            "10",
 +            "Numbers of threads using to check redundant router status.",
 +            null),
 +    RouterExtraPublicNics(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.extra.public.nics",
 +            "2",
 +            "specify extra public nics used for virtual router(up to 5)",
 +            "0-5"),
 +    ScaleRetry("Advanced", ManagementServer.class, Integer.class, "scale.retry", "2", "Number of times to retry scaling up the vm", null),
 +    StopRetryInterval(
 +            "Advanced",
 +            HighAvailabilityManager.class,
 +            Integer.class,
 +            "stop.retry.interval",
 +            "600",
 +            "Time in seconds between retries to stop or destroy a vm",
 +            null),
 +    UpdateWait("Advanced", AgentManager.class, Integer.class, "update.wait", "600", "Time to wait (in seconds) before alerting on a updating agent", null),
 +    XapiWait("Advanced", AgentManager.class, Integer.class, "xapiwait", "60", "Time (in seconds) to wait for XAPI to return", null),
 +    MigrateWait("Advanced", AgentManager.class, Integer.class, "migratewait", "3600", "Time (in seconds) to wait for VM migrate finish", null),
 +    HAWorkers("Advanced", AgentManager.class, Integer.class, "ha.workers", "5", "Number of ha worker threads.", null),
 +    MountParent(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "mount.parent",
 +            "/var/cloudstack/mnt",
 +            "The mount point on the Management Server for Secondary Storage.",
 +            null),
 +    SystemVMAutoReserveCapacity(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "system.vm.auto.reserve.capacity",
 +            "true",
 +            "Indicates whether or not to automatically reserver system VM standby capacity.",
 +            null),
 +    SystemVMDefaultHypervisor("Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "system.vm.default.hypervisor",
 +            null,
 +            "Hypervisor type used to create system vm, valid values are: XenServer, KVM, VMware, Hyperv, VirtualBox, Parralels, BareMetal, Ovm, LXC, Any",
 +            null),
 +    SystemVMRandomPassword(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "system.vm.random.password",
 +            "false",
 +            "Randomize system vm password the first time management server starts",
 +            null),
 +    LinkLocalIpNums("Advanced", ManagementServer.class, Integer.class, "linkLocalIp.nums", "10", "The number of link local ip that needed by domR(in power of 2)", null),
 +    HypervisorList(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "hypervisor.list",
 +            HypervisorType.Hyperv + "," + HypervisorType.KVM + "," + HypervisorType.XenServer + "," + HypervisorType.VMware + "," + HypervisorType.BareMetal + "," +
 +                    HypervisorType.Ovm + "," + HypervisorType.LXC + "," + HypervisorType.Ovm3,
 +                    "The list of hypervisors that this deployment will use.",
 +            "hypervisorList"),
 +    ManagementNetwork("Advanced", ManagementServer.class, String.class, "management.network.cidr", null, "The cidr of management server network", null),
 +    EventPurgeDelay(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "event.purge.delay",
 +            "15",
 +            "Events older than specified number days will be purged. Set this value to 0 to never delete events",
 +            null),
 +    SecStorageVmMTUSize(
 +            "Advanced",
 +            AgentManager.class,
 +            Integer.class,
 +            "secstorage.vm.mtu.size",
 +            String.valueOf(SecondaryStorageVmManager.DEFAULT_SS_VM_MTUSIZE),
 +            "MTU size (in Byte) of storage network in secondary storage vms",
 +            null),
 +    MaxTemplateAndIsoSize(
 +            "Advanced",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.template.iso.size",
 +            "50",
 +            "The maximum size for a downloaded template or ISO (in GB).",
 +            null),
 +    SecStorageAllowedInternalDownloadSites(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "secstorage.allowed.internal.sites",
 +            null,
 +            "Comma separated list of cidrs internal to the datacenter that can host template download servers, please note 0.0.0.0 is not a valid site",
 +            null),
 +    SecStorageEncryptCopy(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "secstorage.encrypt.copy",
 +            "false",
 +            "Use SSL method used to encrypt copy traffic between zones",
 +            "true,false"),
 +    SecStorageSecureCopyCert(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "secstorage.ssl.cert.domain",
 +            "",
 +            "SSL certificate used to encrypt copy traffic between zones",
 +            "domainName"),
 +    SecStorageCapacityStandby(
 +            "Advanced",
 +            AgentManager.class,
 +            Integer.class,
 +            "secstorage.capacity.standby",
 +            "10",
 +            "The minimal number of command execution sessions that system is able to serve immediately(standby capacity)",
 +            null),
 +    SecStorageSessionMax(
 +            "Advanced",
 +            AgentManager.class,
 +            Integer.class,
 +            "secstorage.session.max",
 +            "50",
 +            "The max number of command execution sessions that a SSVM can handle",
 +            null),
 +    SecStorageCmdExecutionTimeMax(
 +            "Advanced",
 +            AgentManager.class,
 +            Integer.class,
 +            "secstorage.cmd.execution.time.max",
 +            "30",
 +            "The max command execution time in minute",
 +            null),
 +    SecStorageProxy(
 +            "Advanced",
 +            AgentManager.class,
 +            String.class,
 +            "secstorage.proxy",
 +            null,
 +            "http proxy used by ssvm, in http://username:password@proxyserver:port format",
 +            null),
 +    AlertPurgeInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "alert.purge.interval",
 +            "86400",
 +            "The interval (in seconds) to wait before running the alert purge thread",
 +            null),
 +    AlertPurgeDelay(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "alert.purge.delay",
 +            "0",
 +            "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts",
 +            null),
 +    HostReservationReleasePeriod(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "host.reservation.release.period",
 +            "300000",
 +            "The interval in milliseconds between host reservation release checks",
 +            null),
 +    // LB HealthCheck Interval.
 +    LBHealthCheck(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "healthcheck.update.interval",
 +            "600",
 +            "Time Interval to fetch the LB health check states (in sec)",
 +            null),
 +    NCCCmdTimeOut(
 +            "Advanced",
 +            ManagementServer.class,
 +            Long.class,
 +            "ncc.command.timeout",
 +            "600000", // 10 minutes
 +            "Command Timeout Interval (in millisec)",
 +            null),
 +    DirectAttachNetworkEnabled(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "direct.attach.network.externalIpAllocator.enabled",
 +            "false",
 +            "Direct-attach VMs using external DHCP server",
 +            "true,false"),
 +    DirectAttachNetworkExternalAPIURL(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "direct.attach.network.externalIpAllocator.url",
 +            null,
 +            "Direct-attach VMs using external DHCP server (API url)",
 +            null),
 +    CheckPodCIDRs(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "check.pod.cidrs",
 +            "true",
 +            "If true, different pods must belong to different CIDR subnets.",
 +            "true,false"),
 +    NetworkGcWait(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.gc.wait",
 +            "600",
 +            "Time (in seconds) to wait before shutting down a network that's not in used",
 +            null),
 +    NetworkGcInterval("Advanced", ManagementServer.class, Integer.class, "network.gc.interval", "600", "Seconds to wait before checking for networks to shutdown", null),
 +    CapacitySkipcountingHours(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "capacity.skipcounting.hours",
 +            "3600",
 +            "Time (in seconds) to wait before release VM's cpu and memory when VM in stopped state",
 +            null),
 +    VmStatsInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.stats.interval",
 +            "60000",
 +            "The interval (in milliseconds) when vm stats are retrieved from agents.",
 +            null),
 +    VmDiskStatsInterval("Advanced", ManagementServer.class, Integer.class, "vm.disk.stats.interval", "0", "Interval (in seconds) to report vm disk statistics.", null),
 +    VolumeStatsInterval("Advanced", ManagementServer.class, Integer.class, "volume.stats.interval", "60000", "Interval (in seconds) to report volume statistics.", null),
 +    VmTransitionWaitInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.tranisition.wait.interval",
 +            "3600",
 +            "Time (in seconds) to wait before taking over a VM in transition state",
 +            null),
 +    VmDiskThrottlingIopsReadRate(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.disk.throttling.iops_read_rate",
 +            "0",
 +            "Default disk I/O read rate in requests per second allowed in User vm's disk.",
 +            null),
 +    VmDiskThrottlingIopsWriteRate(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.disk.throttling.iops_write_rate",
 +            "0",
 +            "Default disk I/O writerate in requests per second allowed in User vm's disk.",
 +            null),
 +    VmDiskThrottlingBytesReadRate(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.disk.throttling.bytes_read_rate",
 +            "0",
 +            "Default disk I/O read rate in bytes per second allowed in User vm's disk.",
 +            null),
 +    VmDiskThrottlingBytesWriteRate(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vm.disk.throttling.bytes_write_rate",
 +            "0",
 +            "Default disk I/O writerate in bytes per second allowed in User vm's disk.",
 +            null),
 +    KvmAutoConvergence(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "kvm.auto.convergence",
 +            "false",
 +            "Setting this to 'true' allows KVM to use auto convergence to complete VM migration (libvirt version 1.2.3+ and QEMU version 1.6+)",
 +            null),
 +    ControlCidr(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "control.cidr",
 +            "169.254.0.0/16",
 +            "Changes the cidr for the control network traffic.  Defaults to using link local.  Must be unique within pods",
 +            null),
 +    ControlGateway("Advanced", ManagementServer.class, String.class, "control.gateway", "169.254.0.1", "gateway for the control network traffic", null),
 +    HostCapacityTypeToOrderClusters(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "host.capacityType.to.order.clusters",
 +            "CPU",
 +            "The host capacity type (CPU or RAM) is used by deployment planner to order clusters during VM resource allocation",
 +            "CPU,RAM"),
 +    ApplyAllocationAlgorithmToPods(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "apply.allocation.algorithm.to.pods",
 +            "false",
 +            "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation",
 +            "true,false"),
 +    VmUserDispersionWeight(
 +            "Advanced",
 +            ManagementServer.class,
 +            Float.class,
 +            "vm.user.dispersion.weight",
 +            "1",
 +            "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 - weight of user dispersion)",
 +            null),
 +    VmAllocationAlgorithm(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vm.allocation.algorithm",
 +            "random",
 +            "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', 'firstfitleastconsumed' : Order in which hosts within a cluster will be considered for VM/volume allocation.",
 +            null),
 +    VmDeploymentPlanner(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vm.deployment.planner",
 +            "FirstFitPlanner",
 +            "'FirstFitPlanner', 'UserDispersingPlanner', 'UserConcentratedPodPlanner': DeploymentPlanner heuristic that will be used for VM deployment.",
 +            null),
 +    ElasticLoadBalancerEnabled(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.basiczone.elb.enabled",
 +            "false",
 +            "Whether the load balancing service is enabled for basic zones",
 +            "true,false"),
 +    ElasticLoadBalancerNetwork(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "network.loadbalancer.basiczone.elb.network",
 +            "guest",
 +            "Whether the elastic load balancing service public ips are taken from the public or guest network",
 +            "guest,public"),
 +    ElasticLoadBalancerVmMemory(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.loadbalancer.basiczone.elb.vm.ram.size",
 +            "128",
 +            "Memory in MB for the elastic load balancer vm",
 +            null),
 +    ElasticLoadBalancerVmCpuMhz(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.loadbalancer.basiczone.elb.vm.cpu.mhz",
 +            "128",
 +            "CPU speed for the elastic load balancer vm",
 +            null),
 +    ElasticLoadBalancerVmNumVcpu(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.loadbalancer.basiczone.elb.vm.vcpu.num",
 +            "1",
 +            "Number of VCPU  for the elastic load balancer vm",
 +            null),
 +    ElasticLoadBalancerVmGcInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.loadbalancer.basiczone.elb.gc.interval.minutes",
 +            "30",
 +            "Garbage collection interval to destroy unused ELB vms in minutes. Minimum of 5",
 +            null),
 +    SortKeyAlgorithm(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "sortkey.algorithm",
 +            "false",
 +            "Sort algorithm for those who use sort key(template, disk offering, service offering, network offering), true means ascending sort while false means descending sort",
 +            null),
 +    EnableEC2API("Advanced", ManagementServer.class, Boolean.class, "enable.ec2.api", "false", "enable EC2 API on CloudStack", null),
 +    EnableS3API("Advanced", ManagementServer.class, Boolean.class, "enable.s3.api", "false", "enable Amazon S3 API on CloudStack", null),
 +    RecreateSystemVmEnabled(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "recreate.systemvm.enabled",
 +            "false",
 +            "If true, will recreate system vm root disk whenever starting system vm",
 +            "true,false"),
 +    SetVmInternalNameUsingDisplayName(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "vm.instancename.flag",
 +            "false",
 +            "If set to true, will set guest VM's name as it appears on the hypervisor, to its hostname. The flag is supported for VMware hypervisor only",
 +            "true,false"),
 +    IncorrectLoginAttemptsAllowed(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "incorrect.login.attempts.allowed",
 +            "5",
 +            "Incorrect login attempts allowed before the user is disabled",
 +            null),
 +    // Ovm
 +    OvmPublicNetwork("Hidden", ManagementServer.class, String.class, "ovm.public.network.device", null, "Specify the public bridge on host for public network", null),
 +    OvmPrivateNetwork("Hidden", ManagementServer.class, String.class, "ovm.private.network.device", null, "Specify the private bridge on host for private network", null),
 +    OvmGuestNetwork("Hidden", ManagementServer.class, String.class, "ovm.guest.network.device", null, "Specify the private bridge on host for private network", null),
 +
 +    // Ovm3
 +    Ovm3PublicNetwork("Hidden", ManagementServer.class, String.class, "ovm3.public.network.device", null, "Specify the public bridge on host for public network", null),
 +    Ovm3PrivateNetwork("Hidden", ManagementServer.class, String.class, "ovm3.private.network.device", null, "Specify the private bridge on host for private network", null),
 +    Ovm3GuestNetwork("Hidden", ManagementServer.class, String.class, "ovm3.guest.network.device", null, "Specify the guest bridge on host for guest network", null),
 +    Ovm3StorageNetwork("Hidden", ManagementServer.class, String.class, "ovm3.storage.network.device", null, "Specify the storage bridge on host for storage network", null),
 +    Ovm3HeartBeatTimeout(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "ovm3.heartbeat.timeout",
 +            "120",
 +            "timeout used for primary storage check, upon timeout a panic is triggered.",
 +            null),
 +    Ovm3HeartBeatInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "ovm3.heartbeat.interval",
 +            "1",
 +            "interval used to check primary storage availability.",
 +            null),
 +
 +
 +    // XenServer
 +    XenServerPublicNetwork(
 +            "Hidden",
 +            ManagementServer.class,
 +            String.class,
 +            "xenserver.public.network.device",
 +            null,
 +            "[ONLY IF THE PUBLIC NETWORK IS ON A DEDICATED NIC]:The network name label of the physical device dedicated to the public network on a XenServer host",
 +            null),
 +    XenServerStorageNetwork1("Hidden", ManagementServer.class, String.class, "xenserver.storage.network.device1", null, "Specify when there are storage networks", null),
 +    XenServerStorageNetwork2("Hidden", ManagementServer.class, String.class, "xenserver.storage.network.device2", null, "Specify when there are storage networks", null),
 +    XenServerPrivateNetwork("Hidden", ManagementServer.class, String.class, "xenserver.private.network.device", null, "Specify when the private network name is different", null),
 +    NetworkGuestCidrLimit(
 +            "Network",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "network.guest.cidr.limit",
 +            "22",
 +            "size limit for guest cidr; can't be less than this value",
 +            null),
 +    XenServerSetupMultipath("Advanced", ManagementServer.class, String.class, "xenserver.setup.multipath", "false", "Setup the host to do multipath", null),
 +    XenServerBondStorageNic("Advanced", ManagementServer.class, String.class, "xenserver.bond.storage.nics", null, "Attempt to bond the two networks if found", null),
 +    XenServerHeartBeatTimeout(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "xenserver.heartbeat.timeout",
 +            "120",
 +            "heartbeat timeout to use when implementing XenServer Self Fencing",
 +            null),
 +    XenServerHeartBeatInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "xenserver.heartbeat.interval",
 +            "60",
 +            "heartbeat interval to use when checking before XenServer Self Fencing",
 +            null),
 +    XenServerGuestNetwork("Hidden", ManagementServer.class, String.class, "xenserver.guest.network.device", null, "Specify for guest network name label", null),
 +    XenServerMaxNics("Advanced", AgentManager.class, Integer.class, "xenserver.nics.max", "7", "Maximum allowed nics for Vms created on XenServer", null),
 +    XenServerPVdriverVersion(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "xenserver.pvdriver.version",
 +            "xenserver61",
 +            "default Xen PV driver version for registered template, valid value:xenserver56,xenserver61 ",
 +            "xenserver56,xenserver61"),
 +    XenServerHotFix("Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "xenserver.hotfix.enabled",
 +            "false",
 +            "Enable/Disable XenServer hot fix",
 +            null),
 +
 +    // VMware
 +    VmwareUseNexusVSwitch(
 +            "Network",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "vmware.use.nexus.vswitch",
 +            "false",
 +            "Enable/Disable Cisco Nexus 1000v vSwitch in VMware environment",
 +            null),
 +    VmwareUseDVSwitch(
 +            "Network",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "vmware.use.dvswitch",
 +            "false",
 +            "Enable/Disable Nexus/Vmware dvSwitch in VMware environment",
 +            null),
 +    VmwareCreateFullClone(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "vmware.create.full.clone",
 +            "true",
 +            "If set to true, creates guest VMs as full clones on ESX",
 +            null),
 +    VmwareServiceConsole(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vmware.service.console",
 +            "Service Console",
 +            "Specify the service console network name(for ESX hosts)",
 +            null),
 +    VmwareManagementPortGroup(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vmware.management.portgroup",
 +            "Management Network",
 +            "Specify the management network name(for ESXi hosts)",
 +            null),
 +    VmwareAdditionalVncPortRangeStart(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vmware.additional.vnc.portrange.start",
 +            "50000",
 +            "Start port number of additional VNC port range",
 +            null),
 +    VmwareAdditionalVncPortRangeSize(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vmware.additional.vnc.portrange.size",
 +            "1000",
 +            "Start port number of additional VNC port range",
 +            null),
 +    //VmwareGuestNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.guest.nic.device.type", "E1000", "Ethernet card type used in guest VM, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null),
 +    VmwareRootDiskControllerType(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vmware.root.disk.controller",
 +            "ide",
 +            "Specify the default disk controller for root volumes, valid values are scsi, ide, osdefault. Please check documentation for more details on each of these values.",
 +            null),
 +    VmwareSystemVmNicDeviceType(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "vmware.systemvm.nic.device.type",
 +            "E1000",
 +            "Specify the default network device type for system VMs, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3",
 +            null),
 +    VmwareRecycleHungWorker(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "vmware.recycle.hung.wokervm",
 +            "false",
 +            "Specify whether or not to recycle hung worker VMs",
 +            null),
 +    VmwareHungWorkerTimeout("Advanced", ManagementServer.class, Long.class, "vmware.hung.wokervm.timeout", "7200", "Worker VM timeout in seconds", null),
 +    VmwareVcenterSessionTimeout("Advanced", ManagementServer.class, Long.class, "vmware.vcenter.session.timeout", "1200", "VMware client timeout in seconds", null),
 +
 +    // KVM
 +    KvmPublicNetwork("Hidden", ManagementServer.class, String.class, "kvm.public.network.device", null, "Specify the public bridge on host for public network", null),
 +    KvmPrivateNetwork("Hidden", ManagementServer.class, String.class, "kvm.private.network.device", null, "Specify the private bridge on host for private network", null),
 +    KvmGuestNetwork("Hidden", ManagementServer.class, String.class, "kvm.guest.network.device", null, "Specify the private bridge on host for private network", null),
-     KvmSshToAgentEnabled(
-             "Advanced",
-             ManagementServer.class,
-             Boolean.class,
-             "kvm.ssh.to.agent",
-             "true",
-             "Specify whether or not the management server is allowed to SSH into KVM Agents",
-             null),
 +
 +    // Hyperv
 +    HypervPublicNetwork(
 +            "Hidden",
 +            ManagementServer.class,
 +            String.class,
 +            "hyperv.public.network.device",
 +            null,
 +            "Specify the public virtual switch on host for public network",
 +            null),
 +    HypervPrivateNetwork(
 +            "Hidden",
 +            ManagementServer.class,
 +            String.class,
 +            "hyperv.private.network.device",
 +            null,
 +            "Specify the virtual switch on host for private network",
 +            null),
 +    HypervGuestNetwork(
 +            "Hidden",
 +            ManagementServer.class,
 +            String.class,
 +            "hyperv.guest.network.device",
 +            null,
 +            "Specify the virtual switch on host for private network",
 +            null),
 +
 +    // Usage
 +    UsageExecutionTimezone("Usage", ManagementServer.class, String.class, "usage.execution.timezone", null, "The timezone to use for usage job execution time", null),
 +    UsageStatsJobAggregationRange(
 +            "Usage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "usage.stats.job.aggregation.range",
 +            "1440",
 +            "The range of time for aggregating the user statistics specified in minutes (e.g. 1440 for daily, 60 for hourly.",
 +            null),
 +    UsageStatsJobExecTime(
 +            "Usage",
 +            ManagementServer.class,
 +            String.class,
 +            "usage.stats.job.exec.time",
 +            "00:15",
 +            "The time at which the usage statistics aggregation job will run as an HH24:MM time, e.g. 00:30 to run at 12:30am.",
 +            null),
 +    EnableUsageServer("Usage", ManagementServer.class, Boolean.class, "enable.usage.server", "true", "Flag for enabling usage", null),
 +    DirectNetworkStatsInterval(
 +            "Usage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "direct.network.stats.interval",
 +            "86400",
 +            "Interval (in seconds) to collect stats from Traffic Monitor",
 +            null),
 +    UsageSanityCheckInterval(
 +            "Usage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "usage.sanity.check.interval",
 +            null,
 +            "Interval (in days) to check sanity of usage data. To disable set it to 0 or negative.",
 +            null),
 +    UsageAggregationTimezone("Usage", ManagementServer.class, String.class, "usage.aggregation.timezone", "GMT", "The timezone to use for usage stats aggregation", null),
 +    TrafficSentinelIncludeZones(
 +            "Usage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "traffic.sentinel.include.zones",
 +            "EXTERNAL",
 +            "Traffic going into specified list of zones is metered. For metering all traffic leave this parameter empty",
 +            null),
 +    TrafficSentinelExcludeZones(
 +            "Usage",
 +            ManagementServer.class,
 +            Integer.class,
 +            "traffic.sentinel.exclude.zones",
 +            "",
 +            "Traffic going into specified list of zones is not metered.",
 +            null),
 +
 +    // Hidden
 +    UseSecondaryStorageVm(
 +            "Hidden",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "secondary.storage.vm",
 +            "false",
 +            "Deploys a VM per zone to manage secondary storage if true, otherwise secondary storage is mounted on management server",
 +            null),
 +    CreatePoolsInPod(
 +            "Hidden",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "xenserver.create.pools.in.pod",
 +            "false",
 +            "Should we automatically add XenServers into pools that are inside a Pod",
 +            null),
 +    CloudIdentifier("Hidden", ManagementServer.class, String.class, "cloud.identifier", null, "A unique identifier for the cloud.", null),
 +    SSOKey("Secure", ManagementServer.class, String.class, "security.singlesignon.key", null, "A Single Sign-On key used for logging into the cloud", null),
 +    SSOAuthTolerance(
 +            "Advanced",
 +            ManagementServer.class,
 +            Long.class,
 +            "security.singlesignon.tolerance.millis",
 +            "300000",
 +            "The allowable clock difference in milliseconds between when an SSO login request is made and when it is received.",
 +            null),
 +    //NetworkType("Hidden", ManagementServer.class, String.class, "network.type", "vlan", "The type of network that this deployment will use.", "vlan,direct"),
 +    RouterRamSize("Hidden", NetworkOrchestrationService.class, Integer.class, "router.ram.size", "256", "Default RAM for router VM (in MB).", null),
 +
 +    DefaultPageSize("Advanced", ManagementServer.class, Long.class, "default.page.size", "500", "Default page size for API list* commands", null),
 +
 +    TaskCleanupRetryInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "task.cleanup.retry.interval",
 +            "600",
 +            "Time (in seconds) to wait before retrying cleanup of tasks if the cleanup failed previously.  0 means to never retry.",
 +            "Seconds"),
 +
 +    // Account Default Limits
 +    DefaultMaxAccountUserVms(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.user.vms",
 +            "20",
 +            "The default maximum number of user VMs that can be deployed for an account",
 +            null),
 +    DefaultMaxAccountPublicIPs(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.public.ips",
 +            "20",
 +            "The default maximum number of public IPs that can be consumed by an account",
 +            null),
 +    DefaultMaxAccountTemplates(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.templates",
 +            "20",
 +            "The default maximum number of templates that can be deployed for an account",
 +            null),
 +    DefaultMaxAccountSnapshots(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.snapshots",
 +            "20",
 +            "The default maximum number of snapshots that can be created for an account",
 +            null),
 +    DefaultMaxAccountVolumes(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.volumes",
 +            "20",
 +            "The default maximum number of volumes that can be created for an account",
 +            null),
 +    DefaultMaxAccountNetworks(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.networks",
 +            "20",
 +            "The default maximum number of networks that can be created for an account",
 +            null),
 +    DefaultMaxAccountVpcs(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.vpcs",
 +            "20",
 +            "The default maximum number of vpcs that can be created for an account",
 +            null),
 +    DefaultMaxAccountCpus(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.cpus",
 +            "40",
 +            "The default maximum number of cpu cores that can be used for an account",
 +            null),
 +    DefaultMaxAccountMemory(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.memory",
 +            "40960",
 +            "The default maximum memory (in MB) that can be used for an account",
 +            null),
 +    DefaultMaxAccountPrimaryStorage(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.primary.storage",
 +            "200",
 +            "The default maximum primary storage space (in GiB) that can be used for an account",
 +            null),
 +    DefaultMaxAccountSecondaryStorage(
 +            "Account Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.account.secondary.storage",
 +            "400",
 +            "The default maximum secondary storage space (in GiB) that can be used for an account",
 +            null),
 +
 +    //disabling lb as cluster sync does not work with distributed cluster
 +    SubDomainNetworkAccess(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Boolean.class,
 +            "allow.subdomain.network.access",
 +            "true",
 +            "Allow subdomains to use networks dedicated to their parent domain(s)",
 +            null),
 +    DnsBasicZoneUpdates(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            String.class,
 +            "network.dns.basiczone.updates",
 +            "all",
 +            "This parameter can take 2 values: all (default) and pod. It defines if DHCP/DNS requests have to be send to all dhcp servers in cloudstack, or only to the one in the same pod",
 +            "all,pod"),
 +
 +    ClusterMessageTimeOutSeconds(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "cluster.message.timeout.seconds",
 +            "300",
 +            "Time (in seconds) to wait before a inter-management server message post times out.",
 +            null),
 +    AgentLoadThreshold(
 +            "Advanced",
 +            ManagementServer.class,
 +            Float.class,
 +            "agent.load.threshold",
 +            "0.7",
 +            "Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening",
 +            null),
 +
 +    DefaultMaxDomainUserVms("Domain Defaults", ManagementServer.class, Long.class, "max.domain.user.vms", "40", "The default maximum number of user VMs that can be deployed for a domain", null),
 +    DefaultMaxDomainPublicIPs("Domain Defaults", ManagementServer.class, Long.class, "max.domain.public.ips", "40", "The default maximum number of public IPs that can be consumed by a domain", null),
 +    DefaultMaxDomainTemplates("Domain Defaults", ManagementServer.class, Long.class, "max.domain.templates", "40", "The default maximum number of templates that can be deployed for a domain", null),
 +    DefaultMaxDomainSnapshots("Domain Defaults", ManagementServer.class, Long.class, "max.domain.snapshots", "40", "The default maximum number of snapshots that can be created for a domain", null),
 +    DefaultMaxDomainVolumes("Domain Defaults", ManagementServer.class, Long.class, "max.domain.volumes", "40", "The default maximum number of volumes that can be created for a domain", null),
 +    DefaultMaxDomainNetworks("Domain Defaults", ManagementServer.class, Long.class, "max.domain.networks", "40", "The default maximum number of networks that can be created for a domain", null),
 +    DefaultMaxDomainVpcs("Domain Defaults", ManagementServer.class, Long.class, "max.domain.vpcs", "40", "The default maximum number of vpcs that can be created for a domain", null),
 +    DefaultMaxDomainCpus("Domain Defaults", ManagementServer.class, Long.class, "max.domain.cpus", "80", "The default maximum number of cpu cores that can be used for a domain", null),
 +    DefaultMaxDomainMemory("Domain Defaults", ManagementServer.class, Long.class, "max.domain.memory", "81920", "The default maximum memory (in MB) that can be used for a domain", null),
 +    DefaultMaxDomainPrimaryStorage("Domain Defaults", ManagementServer.class, Long.class, "max.domain.primary.storage", "400", "The default maximum primary storage space (in GiB) that can be used for a domain", null),
 +    DefaultMaxDomainSecondaryStorage("Domain Defaults", ManagementServer.class, Long.class, "max.domain.secondary.storage", "800", "The default maximum secondary storage space (in GiB) that can be used for a domain", null),
 +
 +    DefaultMaxProjectUserVms(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.user.vms",
 +            "20",
 +            "The default maximum number of user VMs that can be deployed for a project",
 +            null),
 +    DefaultMaxProjectPublicIPs(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.public.ips",
 +            "20",
 +            "The default maximum number of public IPs that can be consumed by a project",
 +            null),
 +    DefaultMaxProjectTemplates(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.templates",
 +            "20",
 +            "The default maximum number of templates that can be deployed for a project",
 +            null),
 +    DefaultMaxProjectSnapshots(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.snapshots",
 +            "20",
 +            "The default maximum number of snapshots that can be created for a project",
 +            null),
 +    DefaultMaxProjectVolumes(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.volumes",
 +            "20",
 +            "The default maximum number of volumes that can be created for a project",
 +            null),
 +    DefaultMaxProjectNetworks(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.networks",
 +            "20",
 +            "The default maximum number of networks that can be created for a project",
 +            null),
 +    DefaultMaxProjectVpcs(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.vpcs",
 +            "20",
 +            "The default maximum number of vpcs that can be created for a project",
 +            null),
 +    DefaultMaxProjectCpus(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.cpus",
 +            "40",
 +            "The default maximum number of cpu cores that can be used for a project",
 +            null),
 +    DefaultMaxProjectMemory(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.memory",
 +            "40960",
 +            "The default maximum memory (in MB) that can be used for a project",
 +            null),
 +    DefaultMaxProjectPrimaryStorage(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.primary.storage",
 +            "200",
 +            "The default maximum primary storage space (in GiB) that can be used for an project",
 +            null),
 +    DefaultMaxProjectSecondaryStorage(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "max.project.secondary.storage",
 +            "400",
 +            "The default maximum secondary storage space (in GiB) that can be used for an project",
 +            null),
 +
 +    ProjectInviteRequired(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "project.invite.required",
 +            "false",
 +            "If invitation confirmation is required when add account to project. Default value is false",
 +            null),
 +    ProjectInvitationExpirationTime(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "project.invite.timeout",
 +            "86400",
 +            "Invitation expiration time (in seconds). Default is 1 day - 86400 seconds",
 +            null),
 +    AllowUserToCreateProject(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            Long.class,
 +            "allow.user.create.projects",
 +            "true",
 +            "If regular user can create a project; true by default",
 +            null),
 +
 +    ProjectEmailSender(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            String.class,
 +            "project.email.sender",
 +            null,
 +            "Sender of project invitation email (will be in the From header of the email)",
 +            null),
 +    ProjectSMTPHost(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            String.class,
 +            "project.smtp.host",
 +            null,
 +            "SMTP hostname used for sending out email project invitations",
 +            null),
 +    ProjectSMTPPassword(
 +            "Secure",
 +            ManagementServer.class,
 +            String.class,
 +            "project.smtp.password",
 +            null,
 +            "Password for SMTP authentication (applies only if project.smtp.useAuth is true)",
 +            null),
 +    ProjectSMTPPort("Project Defaults", ManagementServer.class, Integer.class, "project.smtp.port", "465", "Port the SMTP server is listening on", null),
 +    ProjectSMTPUseAuth(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            String.class,
 +            "project.smtp.useAuth",
 +            null,
 +            "If true, use SMTP authentication when sending emails",
 +            null),
 +    ProjectSMTPUsername(
 +            "Project Defaults",
 +            ManagementServer.class,
 +            String.class,
 +            "project.smtp.username",
 +            null,
 +            "Username for SMTP authentication (applies only if project.smtp.useAuth is true)",
 +            null),
 +
 +    DefaultExternalLoadBalancerCapacity(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "external.lb.default.capacity",
 +            "50",
 +            "default number of networks permitted per external load balancer device",
 +            null),
 +    DefaultExternalFirewallCapacity(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "external.firewall.default.capacity",
 +            "50",
 +            "default number of networks permitted per external load firewall device",
 +            null),
 +    EIPWithMultipleNetScalersEnabled(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "eip.use.multiple.netscalers",
 +            "false",
 +            "Should be set to true, if there will be multiple NetScaler devices providing EIP service in a zone",
 +            null),
 +    ConsoleProxyServiceOffering(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "consoleproxy.service.offering",
 +            null,
 +            "Uuid of the service offering used by console proxy; if NULL - system offering will be used",
 +            null),
 +    SecondaryStorageServiceOffering(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "secstorage.service.offering",
 +            null,
 +            "Uuid of the service offering used by secondary storage; if NULL - system offering will be used",
 +            null),
 +    HaTag("Advanced", ManagementServer.class, String.class, "ha.tag", null, "HA tag defining that the host marked with this tag can be used for HA purposes only", null),
 +    ImplicitHostTags(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "implicit.host.tags",
 +            "GPU",
 +            "Tag hosts at the time of host disovery based on the host properties/capabilities",
 +            null),
 +    VpcCleanupInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "vpc.cleanup.interval",
 +            "3600",
 +            "The interval (in seconds) between cleanup for Inactive VPCs",
 +            null),
 +    VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null),
 +    DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null),
 +    NetworkIPv6SearchRetryMax(
 +            "Network",
 +            ManagementServer.class,
 +            Integer.class,
 +            "network.ipv6.search.retry.max",
 +            "10000",
 +            "The maximum number of retrying times to search for an available IPv6 address in the table",
 +            null),
 +
 +    BaremetalInternalStorageServer(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "baremetal.internal.storage.server.ip",
 +            null,
 +            "the ip address of server that stores kickstart file, kernel, initrd, ISO for advanced networking baremetal provisioning",
 +            null),
 +    BaremetalProvisionDoneNotificationEnabled(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "baremetal.provision.done.notification.enabled",
 +            "true",
 +            "whether to enable baremetal provison done notification",
 +            null),
 +    BaremetalProvisionDoneNotificationTimeout(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "baremetal.provision.done.notification.timeout",
 +            "1800",
 +            "the max time to wait before treating a baremetal provision as failure if no provision done notification is not received, in secs",
 +            null),
 +    BaremetalProvisionDoneNotificationPort(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "baremetal.provision.done.notification.port",
 +            "8080",
 +            "the port that listens baremetal provision done notification. Should be the same to port management server listening on for now. Please change it to management server port if it's not default 8080",
 +            null),
 +    ExternalBaremetalSystemUrl(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "external.baremetal.system.url",
 +            null,
 +            "url of external baremetal system that CloudStack will talk to",
 +            null),
 +    ExternalBaremetalResourceClassName(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "external.baremetal.resource.classname",
 +            null,
 +            "class name for handling external baremetal resource",
 +            null),
 +    EnableBaremetalSecurityGroupAgentEcho(
 +            "Advanced",
 +            ManagementServer.class,
 +            Boolean.class,
 +            "enable.baremetal.securitygroup.agent.echo",
 +            "false",
 +            "After starting provision process, periodcially echo security agent installed in the template. Treat provisioning as success only if echo successfully",
 +            null),
 +    IntervalToEchoBaremetalSecurityGroupAgent(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "interval.baremetal.securitygroup.agent.echo",
 +            "10",
 +            "Interval to echo baremetal security group agent, in seconds",
 +            null),
 +    TimeoutToEchoBaremetalSecurityGroupAgent(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "timeout.baremetal.securitygroup.agent.echo",
 +            "3600",
 +            "Timeout to echo baremetal security group agent, in seconds, the provisioning process will be treated as a failure",
 +            null),
 +
 +    BaremetalIpmiLanInterface(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "baremetal.ipmi.lan.interface",
 +            "default",
 +            "option specified in -I option of impitool. candidates are: open/bmc/lipmi/lan/lanplus/free/imb, see ipmitool man page for details. default valule 'default' means using default option of ipmitool",
 +            null),
 +
 +    BaremetalIpmiRetryTimes("Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "baremetal.ipmi.fail.retry",
 +            "5",
 +            "ipmi interface will be temporary out of order after power opertions(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure",
 +            null),
 +
 +    ApiLimitEnabled("Advanced", ManagementServer.class, Boolean.class, "api.throttling.enabled", "false", "Enable/disable Api rate limit", null),
 +    ApiLimitInterval("Advanced", ManagementServer.class, Integer.class, "api.throttling.interval", "1", "Time interval (in seconds) to reset API count", null),
 +    ApiLimitMax("Advanced", ManagementServer.class, Integer.class, "api.throttling.max", "25", "Max allowed number of APIs within fixed interval", null),
 +    ApiLimitCacheSize("Advanced", ManagementServer.class, Integer.class, "api.throttling.cachesize", "50000", "Account based API count cache size", null),
 +
 +    // object store
 +    S3EnableRRS("Advanced", ManagementServer.class, Boolean.class, "s3.rrs.enabled", "false", "enable s3 reduced redundancy storage", null),
 +    S3MaxSingleUploadSize(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "s3.singleupload.max.size",
 +            "5",
 +            "The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. "
 +                    + "If it is set to -1, then it means always use single-part upload to upload object to S3. ",
 +                    null),
 +
 +    // VMSnapshots
 +    VMSnapshotMax("Advanced", VMSnapshotManager.class, Integer.class, "vmsnapshot.max", "10", "Maximum vm snapshots for a vm", null),
 +    VMSnapshotCreateWait("Advanced", VMSnapshotManager.class, Integer.class, "vmsnapshot.create.wait", "1800", "In second, timeout for create vm snapshot", null),
 +
 +    CloudDnsName("Advanced", ManagementServer.class, String.class, "cloud.dns.name", null, "DNS name of the cloud for the GSLB service", null),
 +    InternalLbVmServiceOfferingId(
 +            "Advanced",
 +            ManagementServer.class,
 +            String.class,
 +            "internallbvm.service.offering",
 +            null,
 +            "Uuid of the service offering used by internal lb vm; if NULL - default system internal lb offering will be used",
 +            null),
 +    ExecuteInSequenceNetworkElementCommands(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Boolean.class,
 +            "execute.in.sequence.network.element.commands",
 +            "false",
 +            "If set to true, DhcpEntryCommand, SavePasswordCommand, VmDataCommand will be synchronized on the agent side."
 +                    + " If set to false, these commands become asynchronous. Default value is false.",
 +                    null),
 +
 +    UCSSyncBladeInterval(
 +            "Advanced",
 +            ManagementServer.class,
 +            Integer.class,
 +            "ucs.sync.blade.interval",
 +            "3600",
 +            "the interval cloudstack sync with UCS manager for available blades in case user remove blades from chassis without notifying CloudStack",
 +            null),
 +
 +    RedundantRouterVrrpInterval(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.redundant.vrrp.interval",
 +            "1",
 +            "seconds between VRRP broadcast. It would 3 times broadcast fail to trigger fail-over mechanism of redundant router",
 +            null),
 +
 +    RouterAggregationCommandEachTimeout(
 +            "Advanced",
 +            NetworkOrchestrationService.class,
 +            Integer.class,
 +            "router.aggregation.command.each.timeout",
 +            "600",
 +            "timeout in seconds for each Virtual Router command being aggregated. The final aggregation command timeout would be determined by this timeout * commands counts ",
 +            null),
 +
 +    ManagementServerVendor("Advanced", ManagementServer.class, String.class, "mgt.server.vendor", "ACS", "the vendor of management server", null),
 +    PublishActionEvent("Advanced", ManagementServer.class, Boolean.class, "publish.action.events", "true", "enable or disable publishing of action events on the event bus", null),
 +    PublishAlertEvent("Advanced", ManagementServer.class, Boolean.class, "publish.alert.events", "true", "enable or disable publishing of alert events on the event bus", null),
 +    PublishResourceStateEvent("Advanced", ManagementServer.class, Boolean.class, "publish.resource.state.events", "true", "enable or disable publishing of alert events on the event bus", null),
 +    PublishUsageEvent("Advanced", ManagementServer.class, Boolean.class, "publish.usage.events", "true", "enable or disable publishing of usage events on the event bus", null),
 +    PublishAsynJobEvent("Advanced", ManagementServer.class, Boolean.class, "publish.async.job.events", "true", "enable or disable publishing of usage events on the event bus", null),
 +
 +    // StatsCollector
 +    StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null),
 +
 +    SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null);
 +
 +    private final String _category;
 +    private final Class<?> _componentClass;
 +    private final Class<?> _type;
 +    private final String _name;
 +    private final String _defaultValue;
 +    private final String _description;
 +    private final String _range;
 +    private final String _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global
 +
 +    private static final HashMap<String, List<Config>> s_scopeLevelConfigsMap = new HashMap<String, List<Config>>();
 +    static {
 +        s_scopeLevelConfigsMap.put(ConfigKey.Scope.Zone.toString(), new ArrayList<Config>());
 +        s_scopeLevelConfigsMap.put(ConfigKey.Scope.Cluster.toString(), new ArrayList<Config>());
 +        s_scopeLevelConfigsMap.put(ConfigKey.Scope.StoragePool.toString(), new ArrayList<Config>());
 +        s_scopeLevelConfigsMap.put(ConfigKey.Scope.Account.toString(), new ArrayList<Config>());
 +        s_scopeLevelConfigsMap.put(ConfigKey.Scope.Global.toString(), new ArrayList<Config>());
 +
 +        for (Config c : Config.values()) {
 +            //Creating group of parameters per each level (zone/cluster/pool/account)
 +            StringTokenizer tokens = new StringTokenizer(c.getScope(), ",");
 +            while (tokens.hasMoreTokens()) {
 +                String scope = tokens.nextToken().trim();
 +                List<Config> currentConfigs = s_scopeLevelConfigsMap.get(scope);
 +                currentConfigs.add(c);
 +                s_scopeLevelConfigsMap.put(scope, currentConfigs);
 +            }
 +        }
 +    }
 +
 +    private static final HashMap<String, List<Config>> Configs = new HashMap<String, List<Config>>();
 +    static {
 +        // Add categories
 +        Configs.put("Alert", new ArrayList<Config>());
 +        Configs.put("Storage", new ArrayList<Config>());
 +        Configs.put("Snapshots", new ArrayList<Config>());
 +        Configs.put("Network", new ArrayList<Config>());
 +        Configs.put("Usage", new ArrayList<Config>());
 +        Configs.put("Console Proxy", new ArrayList<Config>());
 +        Configs.put("Advanced", new ArrayList<Config>());
 +        Configs.put("Usage", new ArrayList<Config>());
 +        Configs.put("Developer", new ArrayList<Config>());
 +        Configs.put("Hidden", new ArrayList<Config>());
 +        Configs.put("Account Defaults", new ArrayList<Config>());
 +        Configs.put("Domain Defaults", new ArrayList<Config>());
 +        Configs.put("Project Defaults", new ArrayList<Config>());
 +        Configs.put("Secure", new ArrayList<Config>());
 +
 +        // Add values into HashMap
 +        for (Config c : Config.values()) {
 +            String category = c.getCategory();
 +            List<Config> currentConfigs = Configs.get(category);
 +            currentConfigs.add(c);
 +            Configs.put(category, currentConfigs);
 +        }
 +    }
 +
 +    private Config(String category, Class<?> componentClass, Class<?> type, String name, String defaultValue, String description, String range) {
 +        _category = category;
 +        _componentClass = componentClass;
 +        _type = type;
 +        _name = name;
 +        _defaultValue = defaultValue;
 +        _description = description;
 +        _range = range;
 +        _scope = ConfigKey.Scope.Global.toString();
 +    }
 +
 +    public String getCategory() {
 +        return _category;
 +    }
 +
 +    public String key() {
 +        return _name;
 +    }
 +
 +    public String getDescription() {
 +        return _description;
 +    }
 +
 +    public String getDefaultValue() {
 +        return _defaultValue;
 +    }
 +
 +    public Class<?> getType() {
 +        return _type;
 +    }
 +
 +    public String getScope() {
 +        return _scope;
 +    }
 +
 +    public String getComponent() {
 +        if (_componentClass == ManagementServer.class) {
 +            return "management-server";
 +        } else if (_componentClass == AgentManager.class) {
 +            return "AgentManager";
 +        } else if (_componentClass == UserVmManager.class) {
 +            return "UserVmManager";
 +        } else if (_componentClass == HighAvailabilityManager.class) {
 +            return "HighAvailabilityManager";
 +        } else if (_componentClass == StoragePoolAllocator.class) {
 +            return "StorageAllocator";
 +        } else if (_componentClass == NetworkOrchestrationService.class) {
 +            return "NetworkManager";
 +        } else if (_componentClass == StorageManager.class) {
 +            return "StorageManager";
 +        } else if (_componentClass == TemplateManager.class) {
 +            return "TemplateManager";
 +        } else if (_componentClass == VpcManager.class) {
 +            return "VpcManager";
 +        } else if (_componentClass == SnapshotManager.class) {
 +            return "SnapshotManager";
 +        } else if (_componentClass == VMSnapshotManager.class) {
 +            return "VMSnapshotManager";
 +        } else {
 +            return "none";
 +        }
 +    }
 +
 +    public String getRange() {
 +        return _range;
 +    }
 +
 +    @Override
 +    public String toString() {
 +        return _name;
 +    }
 +
 +    public static List<Config> getConfigs(String category) {
 +        return Configs.get(category);
 +    }
 +
 +    public static Config getConfig(String name) {
 +        List<String> categories = getCategories();
 +        for (String category : categories) {
 +            List<Config> currentList = getConfigs(category);
 +            for (Config c : currentList) {
 +                if (c.key().equals(name)) {
 +                    return c;
 +                }
 +            }
 +        }
 +
 +        return null;
 +    }
 +
 +    public static List<String> getCategories() {
 +        Object[] keys = Configs.keySet().toArray();
 +        List<String> categories = new ArrayList<String>();
 +        for (Object key : keys) {
 +            categories.add((String)key);
 +        }
 +        return categories;
 +    }
 +}
diff --cc server/src/main/java/com/cloud/resource/ResourceManagerImpl.java
index 8bc97cb,0000000..27fa42c
mode 100755,000000..100755
--- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java
+++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java
@@@ -1,2954 -1,0 +1,2986 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package com.cloud.resource;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.net.URLDecoder;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Random;
 +import java.util.concurrent.ConcurrentHashMap;
 +
 +import javax.inject.Inject;
 +import javax.naming.ConfigurationException;
 +
++import com.cloud.utils.Pair;
 +import com.cloud.vm.dao.UserVmDetailsDao;
 +import org.apache.cloudstack.framework.config.ConfigKey;
 +import org.apache.commons.lang.ObjectUtils;
 +import org.apache.log4j.Logger;
 +import org.springframework.stereotype.Component;
 +
 +import org.apache.cloudstack.api.ApiConstants;
 +import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd;
 +import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd;
 +import org.apache.cloudstack.api.command.admin.host.AddHostCmd;
 +import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd;
 +import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd;
 +import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd;
 +import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
 +import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
 +import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
 +import org.apache.cloudstack.context.CallContext;
 +import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
 +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 +import org.apache.cloudstack.utils.identity.ManagementServerNode;
 +import org.apache.commons.collections.CollectionUtils;
 +
 +import com.cloud.agent.AgentManager;
 +import com.cloud.agent.api.Answer;
 +import com.cloud.agent.api.Command;
 +import com.cloud.agent.api.GetVncPortCommand;
 +import com.cloud.agent.api.GetVncPortAnswer;
 +import com.cloud.agent.api.GetGPUStatsAnswer;
 +import com.cloud.agent.api.GetGPUStatsCommand;
 +import com.cloud.agent.api.GetHostStatsAnswer;
 +import com.cloud.agent.api.GetHostStatsCommand;
 +import com.cloud.agent.api.MaintainAnswer;
 +import com.cloud.agent.api.MaintainCommand;
 +import com.cloud.agent.api.PropagateResourceEventCommand;
 +import com.cloud.agent.api.StartupCommand;
 +import com.cloud.agent.api.StartupRoutingCommand;
 +import com.cloud.agent.api.UnsupportedAnswer;
 +import com.cloud.agent.api.UpdateHostPasswordCommand;
 +import com.cloud.agent.api.VgpuTypesInfo;
 +import com.cloud.agent.api.to.GPUDeviceTO;
 +import com.cloud.agent.transport.Request;
 +import com.cloud.capacity.Capacity;
 +import com.cloud.capacity.CapacityManager;
 +import com.cloud.capacity.CapacityState;
 +import com.cloud.capacity.CapacityVO;
 +import com.cloud.capacity.dao.CapacityDao;
 +import com.cloud.cluster.ClusterManager;
 +import com.cloud.configuration.Config;
 +import com.cloud.configuration.ConfigurationManager;
 +import com.cloud.dc.ClusterDetailsDao;
 +import com.cloud.dc.ClusterDetailsVO;
 +import com.cloud.dc.ClusterVO;
 +import com.cloud.dc.DataCenter;
 +import com.cloud.dc.DataCenter.NetworkType;
 +import com.cloud.dc.DataCenterIpAddressVO;
 +import com.cloud.dc.DataCenterVO;
 +import com.cloud.dc.DedicatedResourceVO;
 +import com.cloud.dc.HostPodVO;
 +import com.cloud.dc.PodCluster;
 +import com.cloud.dc.dao.ClusterDao;
 +import com.cloud.dc.dao.ClusterVSMMapDao;
 +import com.cloud.dc.dao.DataCenterDao;
 +import com.cloud.dc.dao.DataCenterIpAddressDao;
 +import com.cloud.dc.dao.DedicatedResourceDao;
 +import com.cloud.dc.dao.HostPodDao;
 +import com.cloud.deploy.PlannerHostReservationVO;
 +import com.cloud.deploy.dao.PlannerHostReservationDao;
 +import com.cloud.event.ActionEvent;
 +import com.cloud.event.ActionEventUtils;
 +import com.cloud.event.EventTypes;
 +import com.cloud.event.EventVO;
 +import com.cloud.exception.AgentUnavailableException;
 +import com.cloud.exception.DiscoveryException;
 +import com.cloud.exception.InvalidParameterValueException;
 +import com.cloud.exception.PermissionDeniedException;
 +import com.cloud.exception.ResourceInUseException;
 +import com.cloud.gpu.GPU;
 +import com.cloud.gpu.HostGpuGroupsVO;
 +import com.cloud.gpu.VGPUTypesVO;
 +import com.cloud.gpu.dao.HostGpuGroupsDao;
 +import com.cloud.gpu.dao.VGPUTypesDao;
 +import com.cloud.ha.HighAvailabilityManager;
 +import com.cloud.ha.HighAvailabilityManager.WorkType;
 +import com.cloud.host.DetailVO;
 +import com.cloud.host.Host;
 +import com.cloud.host.Host.Type;
 +import com.cloud.host.HostStats;
 +import com.cloud.host.HostVO;
 +import com.cloud.host.Status;
 +import com.cloud.host.Status.Event;
 +import com.cloud.host.dao.HostDao;
 +import com.cloud.host.dao.HostDetailsDao;
 +import com.cloud.host.dao.HostTagsDao;
 +import com.cloud.hypervisor.Hypervisor;
 +import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.hypervisor.kvm.discoverer.KvmDummyResourceBase;
 +import com.cloud.network.dao.IPAddressDao;
 +import com.cloud.network.dao.IPAddressVO;
 +import com.cloud.org.Cluster;
 +import com.cloud.org.Grouping;
 +import com.cloud.org.Managed;
 +import com.cloud.serializer.GsonHelper;
 +import com.cloud.service.dao.ServiceOfferingDetailsDao;
 +import com.cloud.storage.GuestOSCategoryVO;
 +import com.cloud.storage.StorageManager;
 +import com.cloud.storage.StoragePool;
 +import com.cloud.storage.StoragePoolHostVO;
 +import com.cloud.storage.StoragePoolStatus;
 +import com.cloud.storage.StorageService;
 +import com.cloud.storage.VMTemplateVO;
 +import com.cloud.storage.dao.GuestOSCategoryDao;
 +import com.cloud.storage.dao.StoragePoolHostDao;
 +import com.cloud.storage.dao.VMTemplateDao;
 +import com.cloud.user.Account;
 +import com.cloud.user.AccountManager;
 +import com.cloud.utils.StringUtils;
 +import com.cloud.utils.UriUtils;
 +import com.cloud.utils.component.Manager;
 +import com.cloud.utils.component.ManagerBase;
 +import com.cloud.utils.db.DB;
 +import com.cloud.utils.db.Filter;
 +import com.cloud.utils.db.GenericSearchBuilder;
 +import com.cloud.utils.db.GlobalLock;
 +import com.cloud.utils.db.JoinBuilder;
 +import com.cloud.utils.db.QueryBuilder;
 +import com.cloud.utils.db.SearchBuilder;
 +import com.cloud.utils.db.SearchCriteria;
 +import com.cloud.utils.db.SearchCriteria.Func;
 +import com.cloud.utils.db.SearchCriteria.Op;
 +import com.cloud.utils.db.Transaction;
 +import com.cloud.utils.db.TransactionCallback;
 +import com.cloud.utils.db.TransactionCallbackNoReturn;
 +import com.cloud.utils.db.TransactionLegacy;
 +import com.cloud.utils.db.TransactionStatus;
 +import com.cloud.utils.exception.CloudRuntimeException;
 +import com.cloud.utils.fsm.NoTransitionException;
 +import com.cloud.utils.net.Ip;
 +import com.cloud.utils.net.NetUtils;
 +import com.cloud.utils.ssh.SSHCmdHelper;
 +import com.cloud.utils.ssh.SshException;
 +import com.cloud.vm.VMInstanceVO;
 +import com.cloud.vm.VirtualMachine;
 +import com.cloud.vm.VirtualMachine.State;
 +import com.cloud.vm.VirtualMachineManager;
 +import com.cloud.vm.dao.VMInstanceDao;
 +import com.google.gson.Gson;
 +
 +@Component
 +public class ResourceManagerImpl extends ManagerBase implements ResourceManager, ResourceService, Manager {
 +    private static final Logger s_logger = Logger.getLogger(ResourceManagerImpl.class);
 +
 +    Gson _gson;
 +
 +    @Inject
 +    private AccountManager _accountMgr;
 +    @Inject
 +    private AgentManager _agentMgr;
 +    @Inject
 +    private StorageManager _storageMgr;
 +    @Inject
 +    private DataCenterDao _dcDao;
 +    @Inject
 +    private HostPodDao _podDao;
 +    @Inject
 +    private ClusterDetailsDao _clusterDetailsDao;
 +    @Inject
 +    private ClusterDao _clusterDao;
 +    @Inject
 +    private CapacityDao _capacityDao;
 +    @Inject
 +    private HostDao _hostDao;
 +    @Inject
 +    private HostDetailsDao _hostDetailsDao;
 +    @Inject
 +    private ConfigurationDao _configDao;
 +    @Inject
 +    private HostTagsDao _hostTagsDao;
 +    @Inject
 +    private GuestOSCategoryDao _guestOSCategoryDao;
 +    @Inject
 +    protected HostGpuGroupsDao _hostGpuGroupsDao;
 +    @Inject
 +    protected VGPUTypesDao _vgpuTypesDao;
 +    @Inject
 +    private PrimaryDataStoreDao _storagePoolDao;
 +    @Inject
 +    private DataCenterIpAddressDao _privateIPAddressDao;
 +    @Inject
 +    private IPAddressDao _publicIPAddressDao;
 +    @Inject
 +    private VirtualMachineManager _vmMgr;
 +    @Inject
 +    private VMInstanceDao _vmDao;
 +    @Inject
 +    private HighAvailabilityManager _haMgr;
 +    @Inject
 +    private StorageService _storageSvr;
 +    @Inject
 +    PlannerHostReservationDao _plannerHostReserveDao;
 +    @Inject
 +    private DedicatedResourceDao _dedicatedDao;
 +    @Inject
 +    private ServiceOfferingDetailsDao _serviceOfferingDetailsDao;
 +
 +    private List<? extends Discoverer> _discoverers;
 +
 +    public List<? extends Discoverer> getDiscoverers() {
 +        return _discoverers;
 +    }
 +
 +    public void setDiscoverers(final List<? extends Discoverer> discoverers) {
 +        _discoverers = discoverers;
 +    }
 +
 +    @Inject
 +    private ClusterManager _clusterMgr;
 +    @Inject
 +    private StoragePoolHostDao _storagePoolHostDao;
 +
 +    @Inject
 +    private VMTemplateDao _templateDao;
 +    @Inject
 +    private ConfigurationManager _configMgr;
 +    @Inject
 +    private ClusterVSMMapDao _clusterVSMMapDao;
 +    @Inject
 +    private UserVmDetailsDao userVmDetailsDao;
 +
 +    private final long _nodeId = ManagementServerNode.getManagementServerId();
 +
 +    private final HashMap<String, ResourceStateAdapter> _resourceStateAdapters = new HashMap<String, ResourceStateAdapter>();
 +
 +    private final HashMap<Integer, List<ResourceListener>> _lifeCycleListeners = new HashMap<Integer, List<ResourceListener>>();
 +    private HypervisorType _defaultSystemVMHypervisor;
 +
 +    private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 30; // seconds
 +
 +    private GenericSearchBuilder<HostVO, String> _hypervisorsInDC;
 +
 +    private SearchBuilder<HostGpuGroupsVO> _gpuAvailability;
 +
 +    private Map<Long,Integer> retryHostMaintenance = new ConcurrentHashMap<>();
 +
 +    private void insertListener(final Integer event, final ResourceListener listener) {
 +        List<ResourceListener> lst = _lifeCycleListeners.get(event);
 +        if (lst == null) {
 +            lst = new ArrayList<ResourceListener>();
 +            _lifeCycleListeners.put(event, lst);
 +        }
 +
 +        if (lst.contains(listener)) {
 +            throw new CloudRuntimeException("Duplicate resource lisener:" + listener.getClass().getSimpleName());
 +        }
 +
 +        lst.add(listener);
 +    }
 +
 +    @Override
 +    public void registerResourceEvent(final Integer event, final ResourceListener listener) {
 +        synchronized (_lifeCycleListeners) {
 +            if ((event & ResourceListener.EVENT_DISCOVER_BEFORE) != 0) {
 +                insertListener(ResourceListener.EVENT_DISCOVER_BEFORE, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_DISCOVER_AFTER) != 0) {
 +                insertListener(ResourceListener.EVENT_DISCOVER_AFTER, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_DELETE_HOST_BEFORE) != 0) {
 +                insertListener(ResourceListener.EVENT_DELETE_HOST_BEFORE, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_DELETE_HOST_AFTER) != 0) {
 +                insertListener(ResourceListener.EVENT_DELETE_HOST_AFTER, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE) != 0) {
 +                insertListener(ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER) != 0) {
 +                insertListener(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_PREPARE_MAINTENANCE_BEFORE) != 0) {
 +                insertListener(ResourceListener.EVENT_PREPARE_MAINTENANCE_BEFORE, listener);
 +            }
 +            if ((event & ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER) != 0) {
 +                insertListener(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, listener);
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public void unregisterResourceEvent(final ResourceListener listener) {
 +        synchronized (_lifeCycleListeners) {
 +            final Iterator it = _lifeCycleListeners.entrySet().iterator();
 +            while (it.hasNext()) {
 +                final Map.Entry<Integer, List<ResourceListener>> items = (Map.Entry<Integer, List<ResourceListener>>)it.next();
 +                final List<ResourceListener> lst = items.getValue();
 +                lst.remove(listener);
 +            }
 +        }
 +    }
 +
 +    protected void processResourceEvent(final Integer event, final Object... params) {
 +        final List<ResourceListener> lst = _lifeCycleListeners.get(event);
 +        if (lst == null || lst.size() == 0) {
 +            return;
 +        }
 +
 +        String eventName;
 +        for (final ResourceListener l : lst) {
 +            if (event.equals(ResourceListener.EVENT_DISCOVER_BEFORE)) {
 +                l.processDiscoverEventBefore((Long)params[0], (Long)params[1], (Long)params[2], (URI)params[3], (String)params[4], (String)params[5],
 +                        (List<String>)params[6]);
 +                eventName = "EVENT_DISCOVER_BEFORE";
 +            } else if (event.equals(ResourceListener.EVENT_DISCOVER_AFTER)) {
 +                l.processDiscoverEventAfter((Map<? extends ServerResource, Map<String, String>>)params[0]);
 +                eventName = "EVENT_DISCOVER_AFTER";
 +            } else if (event.equals(ResourceListener.EVENT_DELETE_HOST_BEFORE)) {
 +                l.processDeleteHostEventBefore((HostVO)params[0]);
 +                eventName = "EVENT_DELETE_HOST_BEFORE";
 +            } else if (event.equals(ResourceListener.EVENT_DELETE_HOST_AFTER)) {
 +                l.processDeletHostEventAfter((HostVO)params[0]);
 +                eventName = "EVENT_DELETE_HOST_AFTER";
 +            } else if (event.equals(ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE)) {
 +                l.processCancelMaintenaceEventBefore((Long)params[0]);
 +                eventName = "EVENT_CANCEL_MAINTENANCE_BEFORE";
 +            } else if (event.equals(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER)) {
 +                l.processCancelMaintenaceEventAfter((Long)params[0]);
 +                eventName = "EVENT_CANCEL_MAINTENANCE_AFTER";
 +            } else if (event.equals(ResourceListener.EVENT_PREPARE_MAINTENANCE_BEFORE)) {
 +                l.processPrepareMaintenaceEventBefore((Long)params[0]);
 +                eventName = "EVENT_PREPARE_MAINTENANCE_BEFORE";
 +            } else if (event.equals(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER)) {
 +                l.processPrepareMaintenaceEventAfter((Long)params[0]);
 +                eventName = "EVENT_PREPARE_MAINTENANCE_AFTER";
 +            } else {
 +                throw new CloudRuntimeException("Unknown resource event:" + event);
 +            }
 +            s_logger.debug("Sent resource event " + eventName + " to listener " + l.getClass().getSimpleName());
 +        }
 +
 +    }
 +
 +    @DB
 +    @Override
 +    public List<? extends Cluster> discoverCluster(final AddClusterCmd cmd) throws IllegalArgumentException, DiscoveryException, ResourceInUseException {
 +        final long dcId = cmd.getZoneId();
 +        final long podId = cmd.getPodId();
 +        final String clusterName = cmd.getClusterName();
 +        String url = cmd.getUrl();
 +        final String username = cmd.getUsername();
 +        final String password = cmd.getPassword();
 +
 +        if (url != null) {
 +            url = URLDecoder.decode(url);
 +        }
 +
 +        URI uri = null;
 +
 +        // Check if the zone exists in the system
 +        final DataCenterVO zone = _dcDao.findById(dcId);
 +        if (zone == null) {
 +            final InvalidParameterValueException ex = new InvalidParameterValueException("Can't find zone by the id specified");
 +            ex.addProxyObject(String.valueOf(dcId), "dcId");
 +            throw ex;
 +        }
 +
 +        final Account account = CallContext.current().getCallingAccount();
 +        if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
 +            final PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled");
 +            ex.addProxyObject(zone.getUuid(), "dcId");
 +            throw ex;
 +        }
 +
 +        final HostPodVO pod = _podDao.findById(podId);
 +        if (pod == null) {
 +            throw new InvalidParameterValueException("Can't find pod with specified podId " + podId);
 +        }
 +
 +        // Check if the pod exists in the system
 +        if (_podDao.findById(podId) == null) {
 +            throw new InvalidParameterValueException("Can't find pod by id " + podId);
 +        }
 +        // check if pod belongs to the zone
 +        if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) {
 +            final InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified id doesn't belong to the zone " + dcId);
 +            ex.addProxyObject(pod.getUuid(), "podId");
 +            ex.addProxyObject(zone.getUuid(), "dcId");
 +            throw ex;
 +        }
 +
 +        // Verify cluster information and create a new cluster if needed
 +        if (clusterName == null || clusterName.isEmpty()) {
 +            throw new InvalidParameterValueException("Please specify cluster name");
 +        }
 +
 +        if (cmd.getHypervisor() == null || cmd.getHypervisor().isEmpty()) {
 +            throw new InvalidParameterValueException("Please specify a hypervisor");
 +        }
 +
 +        final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(cmd.getHypervisor());
 +        if (hypervisorType == null) {
 +            s_logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type");
 +            throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported ");
 +        }
 +
 +        if (zone.isSecurityGroupEnabled() && zone.getNetworkType().equals(NetworkType.Advanced)) {
 +            if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer
 +                    && hypervisorType != HypervisorType.LXC && hypervisorType != HypervisorType.Simulator) {
 +                throw new InvalidParameterValueException("Don't support hypervisor type " + hypervisorType + " in advanced security enabled zone");
 +            }
 +        }
 +
 +        Cluster.ClusterType clusterType = null;
 +        if (cmd.getClusterType() != null && !cmd.getClusterType().isEmpty()) {
 +            clusterType = Cluster.ClusterType.valueOf(cmd.getClusterType());
 +        }
 +        if (clusterType == null) {
 +            clusterType = Cluster.ClusterType.CloudManaged;
 +        }
 +
 +        Grouping.AllocationState allocationState = null;
 +        if (cmd.getAllocationState() != null && !cmd.getAllocationState().isEmpty()) {
 +            try {
 +                allocationState = Grouping.AllocationState.valueOf(cmd.getAllocationState());
 +            } catch (final IllegalArgumentException ex) {
 +                throw new InvalidParameterValueException("Unable to resolve Allocation State '" + cmd.getAllocationState() + "' to a supported state");
 +            }
 +        }
 +        if (allocationState == null) {
 +            allocationState = Grouping.AllocationState.Enabled;
 +        }
 +
 +        final Discoverer discoverer = getMatchingDiscover(hypervisorType);
 +        if (discoverer == null) {
 +
 +            throw new InvalidParameterValueException("Could not find corresponding resource manager for " + cmd.getHypervisor());
 +        }
 +
 +        if (hypervisorType == HypervisorType.VMware) {
 +            final Map<String, String> allParams = cmd.getFullUrlParams();
 +            discoverer.putParam(allParams);
 +        }
 +
 +        final List<ClusterVO> result = new ArrayList<ClusterVO>();
 +
 +        ClusterVO cluster = new ClusterVO(dcId, podId, clusterName);
 +        cluster.setHypervisorType(hypervisorType.toString());
 +
 +        cluster.setClusterType(clusterType);
 +        cluster.setAllocationState(allocationState);
 +        try {
 +            cluster = _clusterDao.persist(cluster);
 +        } catch (final Exception e) {
 +            // no longer tolerate exception during the cluster creation phase
 +            final CloudRuntimeException ex = new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod and data center with specified ids", e);
 +            // Get the pod VO object's table name.
 +            ex.addProxyObject(pod.getUuid(), "podId");
 +            ex.addProxyObject(zone.getUuid(), "dcId");
 +            throw ex;
 +        }
 +        result.add(cluster);
 +
 +        if (clusterType == Cluster.ClusterType.CloudManaged) {
 +            final Map<String, String> details = new HashMap<String, String>();
 +            // should do this nicer perhaps ?
 +            if (hypervisorType == HypervisorType.Ovm3) {
 +                final Map<String, String> allParams = cmd.getFullUrlParams();
 +                details.put("ovm3vip", allParams.get("ovm3vip"));
 +                details.put("ovm3pool", allParams.get("ovm3pool"));
 +                details.put("ovm3cluster", allParams.get("ovm3cluster"));
 +            }
 +            details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
 +            details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
 +            _clusterDetailsDao.persist(cluster.getId(), details);
 +            return result;
 +        }
 +
 +        // save cluster details for later cluster/host cross-checking
 +        final Map<String, String> details = new HashMap<String, String>();
 +        details.put("url", url);
 +        details.put("username", username);
 +        details.put("password", password);
 +        details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
 +        details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
 +        _clusterDetailsDao.persist(cluster.getId(), details);
 +
 +        boolean success = false;
 +        try {
 +            try {
 +                uri = new URI(UriUtils.encodeURIComponent(url));
 +                if (uri.getScheme() == null) {
 +                    throw new InvalidParameterValueException("uri.scheme is null " + url + ", add http:// as a prefix");
 +                } else if (uri.getScheme().equalsIgnoreCase("http")) {
 +                    if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) {
 +                        throw new InvalidParameterValueException("Your host and/or path is wrong.  Make sure it's of the format http://hostname/path");
 +                    }
 +                }
 +            } catch (final URISyntaxException e) {
 +                throw new InvalidParameterValueException(url + " is not a valid uri");
 +            }
 +
 +            final List<HostVO> hosts = new ArrayList<HostVO>();
 +            Map<? extends ServerResource, Map<String, String>> resources = null;
 +            resources = discoverer.find(dcId, podId, cluster.getId(), uri, username, password, null);
 +
 +            if (resources != null) {
 +                for (final Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
 +                    final ServerResource resource = entry.getKey();
 +
 +                    final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, false);
 +                    if (host != null) {
 +                        hosts.add(host);
 +                    }
 +                    discoverer.postDiscovery(hosts, _nodeId);
 +                }
 +                s_logger.info("External cluster has been successfully discovered by " + discoverer.getName());
 +                success = true;
 +                return result;
 +            }
 +
 +            s_logger.warn("Unable to find the server resources at " + url);
 +            throw new DiscoveryException("Unable to add the external cluster");
 +        } finally {
 +            if (!success) {
 +                _clusterDetailsDao.deleteDetails(cluster.getId());
 +                _clusterDao.remove(cluster.getId());
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public Discoverer getMatchingDiscover(final Hypervisor.HypervisorType hypervisorType) {
 +        for (final Discoverer discoverer : _discoverers) {
 +            if (discoverer.getHypervisorType() == hypervisorType) {
 +                return discoverer;
 +            }
 +        }
 +        return null;
 +    }
 +
 +    @Override
 +    public List<? extends Host> discoverHosts(final AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
 +        Long dcId = cmd.getZoneId();
 +        final Long podId = cmd.getPodId();
 +        final Long clusterId = cmd.getClusterId();
 +        String clusterName = cmd.getClusterName();
 +        final String url = cmd.getUrl();
 +        final String username = cmd.getUsername();
 +        final String password = cmd.getPassword();
 +        final List<String> hostTags = cmd.getHostTags();
 +
 +        dcId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), dcId);
 +
 +        // this is for standalone option
 +        if (clusterName == null && clusterId == null) {
 +            clusterName = "Standalone-" + url;
 +        }
 +
 +        if (clusterId != null) {
 +            final ClusterVO cluster = _clusterDao.findById(clusterId);
 +            if (cluster == null) {
 +                final InvalidParameterValueException ex = new InvalidParameterValueException("can not find cluster for specified clusterId");
 +                ex.addProxyObject(clusterId.toString(), "clusterId");
 +                throw ex;
 +            } else {
 +                if (cluster.getGuid() == null) {
 +                    final List<HostVO> hosts = listAllHostsInCluster(clusterId);
 +                    if (!hosts.isEmpty()) {
 +                        final CloudRuntimeException ex =
 +                                new CloudRuntimeException("Guid is not updated for cluster with specified cluster id; need to wait for hosts in this cluster to come up");
 +                        ex.addProxyObject(cluster.getUuid(), "clusterId");
 +                        throw ex;
 +                    }
 +                }
 +            }
 +        }
 +
 +        return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, cmd.getHypervisor(), hostTags, cmd.getFullUrlParams(), false);
 +    }
 +
 +    @Override
 +    public List<? extends Host> discoverHosts(final AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
 +        final Long dcId = cmd.getZoneId();
 +        final String url = cmd.getUrl();
 +        return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, false);
 +    }
 +
 +    private List<HostVO> discoverHostsFull(final Long dcId, final Long podId, Long clusterId, final String clusterName, String url, String username, String password,
 +            final String hypervisorType, final List<String> hostTags, final Map<String, String> params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException,
 +            InvalidParameterValueException {
 +        URI uri = null;
 +
 +        // Check if the zone exists in the system
 +        final DataCenterVO zone = _dcDao.findById(dcId);
 +        if (zone == null) {
 +            throw new InvalidParameterValueException("Can't find zone by id " + dcId);
 +        }
 +
 +        final Account account = CallContext.current().getCallingAccount();
 +        if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
 +            final PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled");
 +            ex.addProxyObject(zone.getUuid(), "dcId");
 +            throw ex;
 +        }
 +
 +        // Check if the pod exists in the system
 +        if (podId != null) {
 +            final HostPodVO pod = _podDao.findById(podId);
 +            if (pod == null) {
 +                throw new InvalidParameterValueException("Can't find pod by id " + podId);
 +            }
 +            // check if pod belongs to the zone
 +            if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) {
 +                final InvalidParameterValueException ex =
 +                        new InvalidParameterValueException("Pod with specified podId" + podId + " doesn't belong to the zone with specified zoneId" + dcId);
 +                ex.addProxyObject(pod.getUuid(), "podId");
 +                ex.addProxyObject(zone.getUuid(), "dcId");
 +                throw ex;
 +            }
 +        }
 +
 +        // Verify cluster information and create a new cluster if needed
 +        if (clusterName != null && clusterId != null) {
 +            throw new InvalidParameterValueException("Can't specify cluster by both id and name");
 +        }
 +
 +        if (hypervisorType == null || hypervisorType.isEmpty()) {
 +            throw new InvalidParameterValueException("Need to specify Hypervisor Type");
 +        }
 +
 +        if ((clusterName != null || clusterId != null) && podId == null) {
 +            throw new InvalidParameterValueException("Can't specify cluster without specifying the pod");
 +        }
 +
 +        if (clusterId != null) {
 +            if (_clusterDao.findById(clusterId) == null) {
 +                throw new InvalidParameterValueException("Can't find cluster by id " + clusterId);
 +            }
 +
 +            if (hypervisorType.equalsIgnoreCase(HypervisorType.VMware.toString())) {
 +                // VMware only allows adding host to an existing cluster, as we
 +                // already have a lot of information
 +                // in cluster object, to simplify user input, we will construct
 +                // neccessary information here
 +                final Map<String, String> clusterDetails = _clusterDetailsDao.findDetails(clusterId);
 +                username = clusterDetails.get("username");
 +                assert username != null;
 +
 +                password = clusterDetails.get("password");
 +                assert password != null;
 +
 +                try {
 +                    uri = new URI(UriUtils.encodeURIComponent(url));
 +
 +                    url = clusterDetails.get("url") + "/" + uri.getHost();
 +                } catch (final URISyntaxException e) {
 +                    throw new InvalidParameterValueException(url + " is not a valid uri");
 +                }
 +            }
 +        }
 +
 +        if ((hypervisorType.equalsIgnoreCase(HypervisorType.BareMetal.toString()))) {
 +            if (hostTags.isEmpty()) {
 +                throw new InvalidParameterValueException("hosttag is mandatory while adding host of type Baremetal");
 +            }
 +        }
 +
 +        if (clusterName != null) {
 +            final HostPodVO pod = _podDao.findById(podId);
 +            if (pod == null) {
 +                throw new InvalidParameterValueException("Can't find pod by id " + podId);
 +            }
 +            ClusterVO cluster = new ClusterVO(dcId, podId, clusterName);
 +            cluster.setHypervisorType(hypervisorType);
 +            try {
 +                cluster = _clusterDao.persist(cluster);
 +            } catch (final Exception e) {
 +                cluster = _clusterDao.findBy(clusterName, podId);
 +                if (cluster == null) {
 +                    final CloudRuntimeException ex =
 +                            new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod with specified podId and data center with specified dcID", e);
 +                    ex.addProxyObject(pod.getUuid(), "podId");
 +                    ex.addProxyObject(zone.getUuid(), "dcId");
 +                    throw ex;
 +                }
 +            }
 +            clusterId = cluster.getId();
 +            if (_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio") == null) {
 +                final ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", "1");
 +                final ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", "1");
 +                _clusterDetailsDao.persist(cluster_cpu_detail);
 +                _clusterDetailsDao.persist(cluster_memory_detail);
 +            }
 +
 +        }
 +
 +        try {
 +            uri = new URI(UriUtils.encodeURIComponent(url));
 +            if (uri.getScheme() == null) {
 +                throw new InvalidParameterValueException("uri.scheme is null " + url + ", add nfs:// (or cifs://) as a prefix");
 +            } else if (uri.getScheme().equalsIgnoreCase("nfs")) {
 +                if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) {
 +                    throw new InvalidParameterValueException("Your host and/or path is wrong.  Make sure it's of the format nfs://hostname/path");
 +                }
 +            } else if (uri.getScheme().equalsIgnoreCase("cifs")) {
 +                // Don't validate against a URI encoded URI.
 +                final URI cifsUri = new URI(url);
 +                final String warnMsg = UriUtils.getCifsUriParametersProblems(cifsUri);
 +                if (warnMsg != null) {
 +                    throw new InvalidParameterValueException(warnMsg);
 +                }
 +            }
 +        } catch (final URISyntaxException e) {
 +            throw new InvalidParameterValueException(url + " is not a valid uri");
 +        }
 +
 +        final List<HostVO> hosts = new ArrayList<HostVO>();
 +        s_logger.info("Trying to add a new host at " + url + " in data center " + dcId);
 +        boolean isHypervisorTypeSupported = false;
 +        for (final Discoverer discoverer : _discoverers) {
 +            if (params != null) {
 +                discoverer.putParam(params);
 +            }
 +
 +            if (!discoverer.matchHypervisor(hypervisorType)) {
 +                continue;
 +            }
 +            isHypervisorTypeSupported = true;
 +            Map<? extends ServerResource, Map<String, String>> resources = null;
 +
 +            processResourceEvent(ResourceListener.EVENT_DISCOVER_BEFORE, dcId, podId, clusterId, uri, username, password, hostTags);
 +            try {
 +                resources = discoverer.find(dcId, podId, clusterId, uri, username, password, hostTags);
 +            } catch (final DiscoveryException e) {
 +                throw e;
 +            } catch (final Exception e) {
 +                s_logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any");
 +            }
 +            processResourceEvent(ResourceListener.EVENT_DISCOVER_AFTER, resources);
 +
 +            if (resources != null) {
 +                for (final Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
 +                    final ServerResource resource = entry.getKey();
 +                    /*
 +                     * For KVM, if we go to here, that means kvm agent is
 +                     * already connected to mgt svr.
 +                     */
 +                    if (resource instanceof KvmDummyResourceBase) {
 +                        final Map<String, String> details = entry.getValue();
 +                        final String guid = details.get("guid");
 +                        final List<HostVO> kvmHosts = listAllUpAndEnabledHosts(Host.Type.Routing, clusterId, podId, dcId);
 +                        for (final HostVO host : kvmHosts) {
 +                            if (host.getGuid().equalsIgnoreCase(guid)) {
 +                                if (hostTags != null) {
 +                                    if (s_logger.isTraceEnabled()) {
 +                                        s_logger.trace("Adding Host Tags for KVM host, tags:  :" + hostTags);
 +                                    }
 +                                    _hostTagsDao.persist(host.getId(), hostTags);
 +                                }
 +                                hosts.add(host);
 +
 +                                _agentMgr.notifyMonitorsOfNewlyAddedHost(host.getId());
 +
 +                                return hosts;
 +                            }
 +                        }
 +                        return null;
 +                    }
 +
 +                    HostVO host = null;
 +                    if (deferAgentCreation) {
 +                        host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, false);
 +                    } else {
 +                        host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, false);
 +                    }
 +                    if (host != null) {
 +                        hosts.add(host);
 +                    }
 +                    discoverer.postDiscovery(hosts, _nodeId);
 +
 +                }
 +                s_logger.info("server resources successfully discovered by " + discoverer.getName());
 +                return hosts;
 +            }
 +        }
 +        if (!isHypervisorTypeSupported) {
 +            final String msg = "Do not support HypervisorType " + hypervisorType + " for " + url;
 +            s_logger.warn(msg);
 +            throw new DiscoveryException(msg);
 +        }
 +        s_logger.warn("Unable to find the server resources at " + url);
 +        throw new DiscoveryException("Unable to add the host");
 +    }
 +
 +    @Override
 +    public Host getHost(final long hostId) {
 +        return _hostDao.findById(hostId);
 +    }
 +
 +    @DB
 +    protected boolean doDeleteHost(final long hostId, final boolean isForced, final boolean isForceDeleteStorage) {
 +        _accountMgr.getActiveUser(CallContext.current().getCallingUserId());
 +        // Verify that host exists
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist");
 +        }
 +        _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), host.getDataCenterId());
 +
 +        if (!isForced && host.getResourceState() != ResourceState.Maintenance) {
 +            throw new CloudRuntimeException("Host " + host.getUuid() +
 +                    " cannot be deleted as it is not in maintenance mode. Either put the host into maintenance or perform a forced deletion.");
 +        }
 +        // Get storage pool host mappings here because they can be removed as a
 +        // part of handleDisconnect later
 +        final List<StoragePoolHostVO> pools = _storagePoolHostDao.listByHostIdIncludingRemoved(hostId);
 +
 +        final ResourceStateAdapter.DeleteHostAnswer answer =
 +                (ResourceStateAdapter.DeleteHostAnswer)dispatchToStateAdapters(ResourceStateAdapter.Event.DELETE_HOST, false, host, isForced,
 +                        isForceDeleteStorage);
 +
 +        if (answer == null) {
 +            throw new CloudRuntimeException("No resource adapter respond to DELETE_HOST event for " + host.getName() + " id = " + hostId + ", hypervisorType is " +
 +                    host.getHypervisorType() + ", host type is " + host.getType());
 +        }
 +
 +        if (answer.getIsException()) {
 +            return false;
 +        }
 +
 +        if (!answer.getIsContinue()) {
 +            return true;
 +        }
 +
 +        Long clusterId = host.getClusterId();
 +
 +        _agentMgr.notifyMonitorsOfHostAboutToBeRemoved(host.getId());
 +
 +        Transaction.execute(new TransactionCallbackNoReturn() {
 +            @Override
 +            public void doInTransactionWithoutResult(final TransactionStatus status) {
 +                _dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null);
 +                _agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove);
 +
 +                // delete host details
 +                _hostDetailsDao.deleteDetails(hostId);
 +
 +                // if host is GPU enabled, delete GPU entries
 +                _hostGpuGroupsDao.deleteGpuEntries(hostId);
 +
 +                // delete host tags
 +                _hostTagsDao.deleteTags(hostId);
 +
 +                host.setGuid(null);
 +                final Long clusterId = host.getClusterId();
 +                host.setClusterId(null);
 +                _hostDao.update(host.getId(), host);
 +
 +                _hostDao.remove(hostId);
 +                if (clusterId != null) {
 +                    final List<HostVO> hosts = listAllHostsInCluster(clusterId);
 +                    if (hosts.size() == 0) {
 +                        final ClusterVO cluster = _clusterDao.findById(clusterId);
 +                        cluster.setGuid(null);
 +                        _clusterDao.update(clusterId, cluster);
 +                    }
 +                }
 +
 +                try {
 +                    resourceStateTransitTo(host, ResourceState.Event.DeleteHost, _nodeId);
 +                } catch (final NoTransitionException e) {
 +                    s_logger.debug("Cannot transmit host " + host.getId() + " to Enabled state", e);
 +                }
 +
 +                // Delete the associated entries in host ref table
 +                _storagePoolHostDao.deletePrimaryRecordsForHost(hostId);
 +
 +                // Make sure any VMs that were marked as being on this host are cleaned up
 +                final List<VMInstanceVO> vms = _vmDao.listByHostId(hostId);
 +                for (final VMInstanceVO vm : vms) {
 +                    // this is how VirtualMachineManagerImpl does it when it syncs VM states
 +                    vm.setState(State.Stopped);
 +                    vm.setHostId(null);
 +                    _vmDao.persist(vm);
 +                }
 +
 +                // For pool ids you got, delete local storage host entries in pool table
 +                // where
 +                for (final StoragePoolHostVO pool : pools) {
 +                    final Long poolId = pool.getPoolId();
 +                    final StoragePoolVO storagePool = _storagePoolDao.findById(poolId);
 +                    if (storagePool.isLocal() && isForceDeleteStorage) {
 +                        storagePool.setUuid(null);
 +                        storagePool.setClusterId(null);
 +                        _storagePoolDao.update(poolId, storagePool);
 +                        _storagePoolDao.remove(poolId);
 +                        s_logger.debug("Local storage id=" + poolId + " is removed as a part of host removal id=" + hostId);
 +                    }
 +                }
 +
 +                // delete the op_host_capacity entry
 +                final Object[] capacityTypes = {Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY};
 +                final SearchCriteria<CapacityVO> hostCapacitySC = _capacityDao.createSearchCriteria();
 +                hostCapacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
 +                hostCapacitySC.addAnd("capacityType", SearchCriteria.Op.IN, capacityTypes);
 +                _capacityDao.remove(hostCapacitySC);
 +                // remove from dedicated resources
 +                final DedicatedResourceVO dr = _dedicatedDao.findByHostId(hostId);
 +                if (dr != null) {
 +                    _dedicatedDao.remove(dr.getId());
 +                }
 +            }
 +        });
 +
 +        if (clusterId != null) {
 +            _agentMgr.notifyMonitorsOfRemovedHost(host.getId(), clusterId);
 +        }
 +
 +        return true;
 +    }
 +
 +    @Override
 +    public boolean deleteHost(final long hostId, final boolean isForced, final boolean isForceDeleteStorage) {
 +        try {
 +            final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.DeleteHost);
 +            if (result != null) {
 +                return result;
 +            }
 +        } catch (final AgentUnavailableException e) {
 +            return false;
 +        }
 +
 +        return doDeleteHost(hostId, isForced, isForceDeleteStorage);
 +    }
 +
 +    @Override
 +    @DB
 +    public boolean deleteCluster(final DeleteClusterCmd cmd) {
 +        try {
 +            Transaction.execute(new TransactionCallbackNoReturn() {
 +                @Override
 +                public void doInTransactionWithoutResult(final TransactionStatus status) {
 +                    final ClusterVO cluster = _clusterDao.lockRow(cmd.getId(), true);
 +                    if (cluster == null) {
 +                        if (s_logger.isDebugEnabled()) {
 +                            s_logger.debug("Cluster: " + cmd.getId() + " does not even exist.  Delete call is ignored.");
 +                        }
 +                        throw new CloudRuntimeException("Cluster: " + cmd.getId() + " does not exist");
 +                    }
 +
 +                    final Hypervisor.HypervisorType hypervisorType = cluster.getHypervisorType();
 +
 +                    final List<HostVO> hosts = listAllHostsInCluster(cmd.getId());
 +                    if (hosts.size() > 0) {
 +                        if (s_logger.isDebugEnabled()) {
 +                            s_logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove");
 +                        }
 +                        throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts");
 +                    }
 +
 +                    // don't allow to remove the cluster if it has non-removed storage
 +                    // pools
 +                    final List<StoragePoolVO> storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId());
 +                    if (storagePools.size() > 0) {
 +                        if (s_logger.isDebugEnabled()) {
 +                            s_logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove");
 +                        }
 +                        throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools");
 +                    }
 +
 +                    if (_clusterDao.remove(cmd.getId())) {
 +                        _capacityDao.removeBy(null, null, null, cluster.getId(), null);
 +                        // If this cluster is of type vmware, and if the nexus vswitch
 +                        // global parameter setting is turned
 +                        // on, remove the row in cluster_vsm_map for this cluster id.
 +                        if (hypervisorType == HypervisorType.VMware && Boolean.parseBoolean(_configDao.getValue(Config.VmwareUseNexusVSwitch.toString()))) {
 +                            _clusterVSMMapDao.removeByClusterId(cmd.getId());
 +                        }
 +                        // remove from dedicated resources
 +                        final DedicatedResourceVO dr = _dedicatedDao.findByClusterId(cluster.getId());
 +                        if (dr != null) {
 +                            _dedicatedDao.remove(dr.getId());
 +                        }
 +                    }
 +
 +                }
 +            });
 +            return true;
 +        } catch (final CloudRuntimeException e) {
 +            throw e;
 +        } catch (final Throwable t) {
 +            s_logger.error("Unable to delete cluster: " + cmd.getId(), t);
 +            return false;
 +        }
 +    }
 +
 +    @Override
 +    @DB
 +    public Cluster updateCluster(final Cluster clusterToUpdate, final String clusterType, final String hypervisor, final String allocationState, final String managedstate) {
 +
 +        final ClusterVO cluster = (ClusterVO)clusterToUpdate;
 +        // Verify cluster information and update the cluster if needed
 +        boolean doUpdate = false;
 +
 +        if (hypervisor != null && !hypervisor.isEmpty()) {
 +            final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(hypervisor);
 +            if (hypervisorType == null) {
 +                s_logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type");
 +                throw new InvalidParameterValueException("Unable to resolve " + hypervisor + " to a supported type");
 +            } else {
 +                cluster.setHypervisorType(hypervisor);
 +                doUpdate = true;
 +            }
 +        }
 +
 +        Cluster.ClusterType newClusterType = null;
 +        if (clusterType != null && !clusterType.isEmpty()) {
 +            try {
 +                newClusterType = Cluster.ClusterType.valueOf(clusterType);
 +            } catch (final IllegalArgumentException ex) {
 +                throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
 +            }
 +            if (newClusterType == null) {
 +                s_logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type");
 +                throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
 +            } else {
 +                cluster.setClusterType(newClusterType);
 +                doUpdate = true;
 +            }
 +        }
 +
 +        Grouping.AllocationState newAllocationState = null;
 +        if (allocationState != null && !allocationState.isEmpty()) {
 +            try {
 +                newAllocationState = Grouping.AllocationState.valueOf(allocationState);
 +            } catch (final IllegalArgumentException ex) {
 +                throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationState + "' to a supported state");
 +            }
 +            if (newAllocationState == null) {
 +                s_logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State");
 +                throw new InvalidParameterValueException("Unable to resolve " + allocationState + " to a supported state");
 +            } else {
 +                cluster.setAllocationState(newAllocationState);
 +                doUpdate = true;
 +            }
 +        }
 +
 +        Managed.ManagedState newManagedState = null;
 +        final Managed.ManagedState oldManagedState = cluster.getManagedState();
 +        if (managedstate != null && !managedstate.isEmpty()) {
 +            try {
 +                newManagedState = Managed.ManagedState.valueOf(managedstate);
 +            } catch (final IllegalArgumentException ex) {
 +                throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state");
 +            }
 +            if (newManagedState == null) {
 +                s_logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state");
 +                throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state");
 +            } else {
 +                doUpdate = true;
 +            }
 +        }
 +
 +        if (doUpdate) {
 +            _clusterDao.update(cluster.getId(), cluster);
 +        }
 +
 +        if (newManagedState != null && !newManagedState.equals(oldManagedState)) {
 +            if (newManagedState.equals(Managed.ManagedState.Unmanaged)) {
 +                boolean success = false;
 +                try {
 +                    cluster.setManagedState(Managed.ManagedState.PrepareUnmanaged);
 +                    _clusterDao.update(cluster.getId(), cluster);
 +                    List<HostVO> hosts = listAllHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
 +                    for (final HostVO host : hosts) {
 +                        if (host.getType().equals(Host.Type.Routing) && !host.getStatus().equals(Status.Down) && !host.getStatus().equals(Status.Disconnected) &&
 +                                !host.getStatus().equals(Status.Up) && !host.getStatus().equals(Status.Alert)) {
 +                            final String msg = "host " + host.getPrivateIpAddress() + " should not be in " + host.getStatus().toString() + " status";
 +                            throw new CloudRuntimeException("PrepareUnmanaged Failed due to " + msg);
 +                        }
 +                    }
 +
 +                    for (final HostVO host : hosts) {
 +                        if (host.getStatus().equals(Status.Up)) {
 +                            umanageHost(host.getId());
 +                        }
 +                    }
 +                    final int retry = 40;
 +                    boolean lsuccess = true;
 +                    for (int i = 0; i < retry; i++) {
 +                        lsuccess = true;
 +                        try {
 +                            Thread.sleep(5 * 1000);
 +                        } catch (final Exception e) {
 +                        }
 +                        hosts = listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
 +                        for (final HostVO host : hosts) {
 +                            if (!host.getStatus().equals(Status.Down) && !host.getStatus().equals(Status.Disconnected) && !host.getStatus().equals(Status.Alert)) {
 +                                lsuccess = false;
 +                                break;
 +                            }
 +                        }
 +                        if (lsuccess == true) {
 +                            success = true;
 +                            break;
 +                        }
 +                    }
 +                    if (success == false) {
 +                        throw new CloudRuntimeException("PrepareUnmanaged Failed due to some hosts are still in UP status after 5 Minutes, please try later ");
 +                    }
 +                } finally {
 +                    cluster.setManagedState(success ? Managed.ManagedState.Unmanaged : Managed.ManagedState.PrepareUnmanagedError);
 +                    _clusterDao.update(cluster.getId(), cluster);
 +                }
 +            } else if (newManagedState.equals(Managed.ManagedState.Managed)) {
 +                cluster.setManagedState(Managed.ManagedState.Managed);
 +                _clusterDao.update(cluster.getId(), cluster);
 +            }
 +
 +        }
 +
 +        return cluster;
 +    }
 +
 +    @Override
 +    public Host cancelMaintenance(final CancelMaintenanceCmd cmd) {
 +        final Long hostId = cmd.getId();
 +
 +        // verify input parameters
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null || host.getRemoved() != null) {
 +            throw new InvalidParameterValueException("Host with id " + hostId.toString() + " doesn't exist");
 +        }
 +
 +        processResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE, hostId);
 +        final boolean success = cancelMaintenance(hostId);
 +        processResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER, hostId);
 +        if (!success) {
 +            throw new CloudRuntimeException("Internal error cancelling maintenance.");
 +        }
 +        return host;
 +    }
 +
 +    @Override
 +    public Host reconnectHost(ReconnectHostCmd cmd) throws AgentUnavailableException {
 +        Long hostId = cmd.getId();
 +
 +        HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            throw new InvalidParameterValueException("Host with id " + hostId.toString() + " doesn't exist");
 +        }
 +        _agentMgr.reconnect(hostId);
 +        return host;
 +    }
 +
 +    @Override
 +    public boolean resourceStateTransitTo(final Host host, final ResourceState.Event event, final long msId) throws NoTransitionException {
 +        final ResourceState currentState = host.getResourceState();
 +        final ResourceState nextState = currentState.getNextState(event);
 +        if (nextState == null) {
 +            throw new NoTransitionException("No next resource state found for current state = " + currentState + " event = " + event);
 +        }
 +
 +        // TO DO - Make it more granular and have better conversion into capacity type
 +        if(host.getType() == Type.Routing){
 +            final CapacityState capacityState =  nextState == ResourceState.Enabled ? CapacityState.Enabled : CapacityState.Disabled;
 +            final short[] capacityTypes = {Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY};
 +            _capacityDao.updateCapacityState(null, null, null, host.getId(), capacityState.toString(), capacityTypes);
 +
 +            final StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host.getId());
 +
 +            if(storagePool != null){
 +                final short[] capacityTypesLocalStorage = {Capacity.CAPACITY_TYPE_LOCAL_STORAGE};
 +                _capacityDao.updateCapacityState(null, null, null, storagePool.getId(), capacityState.toString(), capacityTypesLocalStorage);
 +            }
 +        }
 +        return _hostDao.updateResourceState(currentState, event, nextState, host);
 +    }
 +
 +    private boolean doMaintain(final long hostId) {
 +        final HostVO host = _hostDao.findById(hostId);
 +        final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand());
 +        if (answer == null || !answer.getResult()) {
 +            s_logger.warn("Unable to send MaintainCommand to host: " + hostId);
 +            return false;
 +        }
 +
 +        try {
 +            resourceStateTransitTo(host, ResourceState.Event.AdminAskMaintenace, _nodeId);
 +        } catch (final NoTransitionException e) {
 +            final String err = "Cannot transmit resource state of host " + host.getId() + " to " + ResourceState.Maintenance;
 +            s_logger.debug(err, e);
 +            throw new CloudRuntimeException(err + e.getMessage());
 +        }
 +
 +        ActionEventUtils.onStartedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventTypes.EVENT_MAINTENANCE_PREPARE, "starting maintenance for host " + hostId, true, 0);
 +        _agentMgr.pullAgentToMaintenance(hostId);
 +        setHostMaintenanceRetries(host);
 +
 +        /* TODO: move below to listener */
 +        if (host.getType() == Host.Type.Routing) {
 +
 +            final List<VMInstanceVO> vms = _vmDao.listByHostId(hostId);
 +            if (vms.size() == 0) {
 +                return true;
 +            }
 +
 +            final List<HostVO> hosts = listAllUpAndEnabledHosts(Host.Type.Routing, host.getClusterId(), host.getPodId(), host.getDataCenterId());
 +            for (final VMInstanceVO vm : vms) {
 +                if (hosts == null || hosts.isEmpty() || !answer.getMigrate()
 +                        || _serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.vgpuType.toString()) != null) {
 +                    // Migration is not supported for VGPU Vms so stop them.
 +                    // for the last host in this cluster, stop all the VMs
 +                    _haMgr.scheduleStop(vm, hostId, WorkType.ForceStop);
 +                } else if (HypervisorType.LXC.equals(host.getHypervisorType()) && VirtualMachine.Type.User.equals(vm.getType())){
 +                    //Migration is not supported for LXC Vms. Schedule restart instead.
 +                    _haMgr.scheduleRestart(vm, false);
 +                } else {
 +                    _haMgr.scheduleMigration(vm);
 +                }
 +            }
 +        }
 +        return true;
 +    }
 +
 +    /**
 +     * Set retries for transiting the host into Maintenance
 +     */
 +    protected void setHostMaintenanceRetries(HostVO host) {
 +        Integer retries = HostMaintenanceRetries.valueIn(host.getClusterId());
 +        retryHostMaintenance.put(host.getId(), retries);
 +        s_logger.debug(String.format("Setting the host %s (%s) retries for Maintenance mode: %s",
 +                host.getId(), host.getName(), retries));
 +    }
 +
 +    @Override
 +    public boolean maintain(final long hostId) throws AgentUnavailableException {
 +        final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.AdminAskMaintenace);
 +        if (result != null) {
 +            return result;
 +        }
 +
 +        return doMaintain(hostId);
 +    }
 +
 +    @Override
 +    public Host maintain(final PrepareForMaintenanceCmd cmd) {
 +        final Long hostId = cmd.getId();
 +        final HostVO host = _hostDao.findById(hostId);
 +
 +        if (host == null) {
 +            s_logger.debug("Unable to find host " + hostId);
 +            throw new InvalidParameterValueException("Unable to find host with ID: " + hostId + ". Please specify a valid host ID.");
 +        }
 +
 +        if (_hostDao.countBy(host.getClusterId(), ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance) > 0) {
 +            throw new InvalidParameterValueException("There are other servers in PrepareForMaintenance OR ErrorInMaintenance STATUS in cluster " + host.getClusterId());
 +        }
 +
 +        if (_storageMgr.isLocalStorageActiveOnHost(host.getId())) {
 +            throw new InvalidParameterValueException("There are active VMs using the host's local storage pool. Please stop all VMs on this host that use local storage.");
 +        }
 +
 +        try {
 +            processResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_BEFORE, hostId);
 +            if (maintain(hostId)) {
 +                processResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, hostId);
 +                return _hostDao.findById(hostId);
 +            } else {
 +                throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId);
 +            }
 +        } catch (final AgentUnavailableException e) {
 +            throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId);
 +        }
 +    }
 +
 +    /**
 +     * Add VNC details as user VM details for each VM in 'vms' (KVM hosts only)
 +     */
 +    protected void setKVMVncAccess(long hostId, List<VMInstanceVO> vms) {
 +        for (VMInstanceVO vm : vms) {
 +            GetVncPortAnswer vmVncPortAnswer = (GetVncPortAnswer) _agentMgr.easySend(hostId, new GetVncPortCommand(vm.getId(), vm.getInstanceName()));
 +            if (vmVncPortAnswer != null) {
 +                userVmDetailsDao.addDetail(vm.getId(), "kvm.vnc.address", vmVncPortAnswer.getAddress(), true);
 +                userVmDetailsDao.addDetail(vm.getId(), "kvm.vnc.port", String.valueOf(vmVncPortAnswer.getPort()), true);
 +            }
 +        }
 +    }
 +
 +    /**
 +     * Configure VNC access for host VMs which have failed migrating to another host while trying to enter Maintenance mode
 +     */
 +    protected void configureVncAccessForKVMHostFailedMigrations(HostVO host, List<VMInstanceVO> failedMigrations) {
 +        if (host.getHypervisorType().equals(HypervisorType.KVM)) {
 +            _agentMgr.pullAgentOutMaintenance(host.getId());
 +            setKVMVncAccess(host.getId(), failedMigrations);
 +            _agentMgr.pullAgentToMaintenance(host.getId());
 +        }
 +    }
 +
 +    /**
 +     * Set host into ErrorInMaintenance state, as errors occurred during VM migrations. Do the following:
 +     * - Cancel scheduled migrations for those which have already failed
 +     * - Configure VNC access for VMs (KVM hosts only)
 +     */
 +    protected boolean setHostIntoErrorInMaintenance(HostVO host, List<VMInstanceVO> failedMigrations) throws NoTransitionException {
 +        s_logger.debug("Unable to migrate " + failedMigrations.size() + " VM(s) from host " + host.getUuid());
 +        _haMgr.cancelScheduledMigrations(host);
 +        configureVncAccessForKVMHostFailedMigrations(host, failedMigrations);
 +        resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId);
 +        return false;
 +    }
 +
 +    /**
 +     * Safely transit host into Maintenance mode
 +     */
 +    protected boolean setHostIntoMaintenance(HostVO host) throws NoTransitionException {
 +        s_logger.debug("Host " + host.getUuid() + " entering in Maintenance");
 +        resourceStateTransitTo(host, ResourceState.Event.InternalEnterMaintenance, _nodeId);
 +        ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(),
 +                EventVO.LEVEL_INFO, EventTypes.EVENT_MAINTENANCE_PREPARE,
 +                "completed maintenance for host " + host.getId(), 0);
 +        return true;
 +    }
 +
 +    /**
 +     * Return true if host goes into Maintenance mode, only when:
 +     * - No Running, Migrating or Failed migrations (host_id = last_host_id) for the host
 +     */
 +    protected boolean isHostInMaintenance(HostVO host, List<VMInstanceVO> runningVms, List<VMInstanceVO> migratingVms, List<VMInstanceVO> failedMigrations) throws NoTransitionException {
 +        if (CollectionUtils.isEmpty(runningVms) && CollectionUtils.isEmpty(migratingVms)) {
 +            return CollectionUtils.isEmpty(failedMigrations) ?
 +                    setHostIntoMaintenance(host) :
 +                    setHostIntoErrorInMaintenance(host, failedMigrations);
 +        } else if (retryHostMaintenance.containsKey(host.getId())) {
 +            Integer retriesLeft = retryHostMaintenance.get(host.getId());
 +            if (retriesLeft != null) {
 +                if (retriesLeft <= 0) {
 +                    retryHostMaintenance.remove(host.getId());
 +                    s_logger.debug(String.format("No retries left while preparing KVM host %s (%s) for Maintenance, " +
 +                                    "please investigate this connection.",
 +                            host.getId(), host.getName()));
 +                    return setHostIntoErrorInMaintenance(host, failedMigrations);
 +                }
 +                retriesLeft--;
 +                retryHostMaintenance.put(host.getId(), retriesLeft);
 +                s_logger.debug(String.format("Retries left preparing KVM host %s (%s) for Maintenance: %s",
 +                        host.getId(), host.getName(), retriesLeft));
 +            }
 +        }
 +
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean checkAndMaintain(final long hostId) {
 +        boolean hostInMaintenance = false;
 +        final HostVO host = _hostDao.findById(hostId);
 +
 +        try {
 +            if (host.getType() != Host.Type.Storage) {
 +                final List<VMInstanceVO> vos = _vmDao.listByHostId(hostId);
 +                final List<VMInstanceVO> vosMigrating = _vmDao.listVmsMigratingFromHost(hostId);
 +                final List<VMInstanceVO> failedVmMigrations = _vmDao.listNonMigratingVmsByHostEqualsLastHost(hostId);
 +
 +                hostInMaintenance = isHostInMaintenance(host, vos, vosMigrating, failedVmMigrations);
 +            }
 +        } catch (final NoTransitionException e) {
 +            s_logger.debug("Cannot transmit host " + host.getId() + "to Maintenance state", e);
 +        }
 +        return hostInMaintenance;
 +    }
 +
 +    @Override
 +    public Host updateHost(final UpdateHostCmd cmd) throws NoTransitionException {
 +        final Long hostId = cmd.getId();
 +        final Long guestOSCategoryId = cmd.getOsCategoryId();
 +
 +        // Verify that the host exists
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist");
 +        }
 +
 +        if (cmd.getAllocationState() != null) {
 +            final ResourceState.Event resourceEvent = ResourceState.Event.toEvent(cmd.getAllocationState());
 +            if (resourceEvent != ResourceState.Event.Enable && resourceEvent != ResourceState.Event.Disable) {
 +                throw new CloudRuntimeException("Invalid allocation state:" + cmd.getAllocationState() + ", only Enable/Disable are allowed");
 +            }
 +
 +            resourceStateTransitTo(host, resourceEvent, _nodeId);
 +        }
 +
 +        if (guestOSCategoryId != null) {
 +            // Verify that the guest OS Category exists
 +            if (!(guestOSCategoryId > 0) || _guestOSCategoryDao.findById(guestOSCategoryId) == null) {
 +                throw new InvalidParameterValueException("Please specify a valid guest OS category.");
 +            }
 +
 +            final GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId);
 +            final DetailVO guestOSDetail = _hostDetailsDao.findDetail(hostId, "guest.os.category.id");
 +
 +            if (guestOSCategory != null && !GuestOSCategoryVO.CATEGORY_NONE.equalsIgnoreCase(guestOSCategory.getName())) {
 +                // Create/Update an entry for guest.os.category.id
 +                if (guestOSDetail != null) {
 +                    guestOSDetail.setValue(String.valueOf(guestOSCategory.getId()));
 +                    _hostDetailsDao.update(guestOSDetail.getId(), guestOSDetail);
 +                } else {
 +                    final Map<String, String> detail = new HashMap<String, String>();
 +                    detail.put("guest.os.category.id", String.valueOf(guestOSCategory.getId()));
 +                    _hostDetailsDao.persist(hostId, detail);
 +                }
 +            } else {
 +                // Delete any existing entry for guest.os.category.id
 +                if (guestOSDetail != null) {
 +                    _hostDetailsDao.remove(guestOSDetail.getId());
 +                }
 +            }
 +        }
 +
 +        final List<String> hostTags = cmd.getHostTags();
 +        if (hostTags != null) {
 +            if (s_logger.isDebugEnabled()) {
 +                s_logger.debug("Updating Host Tags to :" + hostTags);
 +            }
 +            _hostTagsDao.persist(hostId, hostTags);
 +        }
 +
 +        final String url = cmd.getUrl();
 +        if (url != null) {
 +            _storageMgr.updateSecondaryStorage(cmd.getId(), cmd.getUrl());
 +        }
 +
 +        final HostVO updatedHost = _hostDao.findById(hostId);
 +        return updatedHost;
 +    }
 +
 +    @Override
 +    public Cluster getCluster(final Long clusterId) {
 +        return _clusterDao.findById(clusterId);
 +    }
 +
 +    @Override
 +    public DataCenter getZone(Long zoneId) {
 +        return _dcDao.findById(zoneId);
 +    }
 +
 +    @Override
 +    public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
 +        _defaultSystemVMHypervisor = HypervisorType.getType(_configDao.getValue(Config.SystemVMDefaultHypervisor.toString()));
 +        _gson = GsonHelper.getGson();
 +
 +        _hypervisorsInDC = _hostDao.createSearchBuilder(String.class);
 +        _hypervisorsInDC.select(null, Func.DISTINCT, _hypervisorsInDC.entity().getHypervisorType());
 +        _hypervisorsInDC.and("hypervisorType", _hypervisorsInDC.entity().getHypervisorType(), SearchCriteria.Op.NNULL);
 +        _hypervisorsInDC.and("dataCenter", _hypervisorsInDC.entity().getDataCenterId(), SearchCriteria.Op.EQ);
 +        _hypervisorsInDC.and("id", _hypervisorsInDC.entity().getId(), SearchCriteria.Op.NEQ);
 +        _hypervisorsInDC.and("type", _hypervisorsInDC.entity().getType(), SearchCriteria.Op.EQ);
 +        _hypervisorsInDC.done();
 +
 +        _gpuAvailability = _hostGpuGroupsDao.createSearchBuilder();
 +        _gpuAvailability.and("hostId", _gpuAvailability.entity().getHostId(), Op.EQ);
 +        _gpuAvailability.and("groupName", _gpuAvailability.entity().getGroupName(), Op.EQ);
 +        final SearchBuilder<VGPUTypesVO> join1 = _vgpuTypesDao.createSearchBuilder();
 +        join1.and("vgpuType", join1.entity().getVgpuType(), Op.EQ);
 +        join1.and("remainingCapacity", join1.entity().getRemainingCapacity(), Op.GT);
 +        _gpuAvailability.join("groupId", join1, _gpuAvailability.entity().getId(), join1.entity().getGpuGroupId(), JoinBuilder.JoinType.INNER);
 +        _gpuAvailability.done();
 +
 +        return true;
 +    }
 +
 +    @Override
 +    public List<HypervisorType> getSupportedHypervisorTypes(final long zoneId, final boolean forVirtualRouter, final Long podId) {
 +        final List<HypervisorType> hypervisorTypes = new ArrayList<HypervisorType>();
 +
 +        List<ClusterVO> clustersForZone = new ArrayList<ClusterVO>();
 +        if (podId != null) {
 +            clustersForZone = _clusterDao.listByPodId(podId);
 +        } else {
 +            clustersForZone = _clusterDao.listByZoneId(zoneId);
 +        }
 +
 +        for (final ClusterVO cluster : clustersForZone) {
 +            final HypervisorType hType = cluster.getHypervisorType();
 +            if (!forVirtualRouter || forVirtualRouter && hType != HypervisorType.BareMetal && hType != HypervisorType.Ovm) {
 +                hypervisorTypes.add(hType);
 +            }
 +        }
 +
 +        return hypervisorTypes;
 +    }
 +
 +    @Override
 +    public HypervisorType getDefaultHypervisor(final long zoneId) {
 +        HypervisorType defaultHyper = HypervisorType.None;
 +        if (_defaultSystemVMHypervisor != HypervisorType.None) {
 +            defaultHyper = _defaultSystemVMHypervisor;
 +        }
 +
 +        final DataCenterVO dc = _dcDao.findById(zoneId);
 +        if (dc == null) {
 +            return HypervisorType.None;
 +        }
 +        _dcDao.loadDetails(dc);
 +        final String defaultHypervisorInZone = dc.getDetail("defaultSystemVMHypervisorType");
 +        if (defaultHypervisorInZone != null) {
 +            defaultHyper = HypervisorType.getType(defaultHypervisorInZone);
 +        }
 +
 +        final List<VMTemplateVO> systemTemplates = _templateDao.listAllSystemVMTemplates();
 +        boolean isValid = false;
 +        for (final VMTemplateVO template : systemTemplates) {
 +            if (template.getHypervisorType() == defaultHyper) {
 +                isValid = true;
 +                break;
 +            }
 +        }
 +
 +        if (isValid) {
 +            final List<ClusterVO> clusters = _clusterDao.listByDcHyType(zoneId, defaultHyper.toString());
 +            if (clusters.size() <= 0) {
 +                isValid = false;
 +            }
 +        }
 +
 +        if (isValid) {
 +            return defaultHyper;
 +        } else {
 +            return HypervisorType.None;
 +        }
 +    }
 +
 +    @Override
 +    public HypervisorType getAvailableHypervisor(final long zoneId) {
 +        HypervisorType defaultHype = getDefaultHypervisor(zoneId);
 +        if (defaultHype == HypervisorType.None) {
 +            final List<HypervisorType> supportedHypes = getSupportedHypervisorTypes(zoneId, false, null);
 +            if (supportedHypes.size() > 0) {
 +                Collections.shuffle(supportedHypes);
 +                defaultHype = supportedHypes.get(0);
 +            }
 +        }
 +
 +        if (defaultHype == HypervisorType.None) {
 +            defaultHype = HypervisorType.Any;
 +        }
 +        return defaultHype;
 +    }
 +
 +    @Override
 +    public void registerResourceStateAdapter(final String name, final ResourceStateAdapter adapter) {
 +        synchronized (_resourceStateAdapters) {
 +            if (_resourceStateAdapters.get(name) != null) {
 +                throw new CloudRuntimeException(name + " has registered");
 +            }
 +            _resourceStateAdapters.put(name, adapter);
 +        }
 +    }
 +
 +    @Override
 +    public void unregisterResourceStateAdapter(final String name) {
 +        synchronized (_resourceStateAdapters) {
 +            _resourceStateAdapters.remove(name);
 +        }
 +    }
 +
 +    private Object dispatchToStateAdapters(final ResourceStateAdapter.Event event, final boolean singleTaker, final Object... args) {
 +        synchronized (_resourceStateAdapters) {
 +            final Iterator<Map.Entry<String, ResourceStateAdapter>> it = _resourceStateAdapters.entrySet().iterator();
 +            Object result = null;
 +            while (it.hasNext()) {
 +                final Map.Entry<String, ResourceStateAdapter> item = it.next();
 +                final ResourceStateAdapter adapter = item.getValue();
 +
 +                final String msg = "Dispatching resource state event " + event + " to " + item.getKey();
 +                s_logger.debug(msg);
 +
 +                if (event == ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED) {
 +                    result = adapter.createHostVOForConnectedAgent((HostVO)args[0], (StartupCommand[])args[1]);
 +                    if (result != null && singleTaker) {
 +                        break;
 +                    }
 +                } else if (event == ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT) {
 +                    result =
 +                            adapter.createHostVOForDirectConnectAgent((HostVO)args[0], (StartupCommand[])args[1], (ServerResource)args[2], (Map<String, String>)args[3],
 +                                    (List<String>)args[4]);
 +                    if (result != null && singleTaker) {
 +                        break;
 +                    }
 +                } else if (event == ResourceStateAdapter.Event.DELETE_HOST) {
 +                    try {
 +                        result = adapter.deleteHost((HostVO)args[0], (Boolean)args[1], (Boolean)args[2]);
 +                        if (result != null) {
 +                            break;
 +                        }
 +                    } catch (final UnableDeleteHostException e) {
 +                        s_logger.debug("Adapter " + adapter.getName() + " says unable to delete host", e);
 +                        result = new ResourceStateAdapter.DeleteHostAnswer(false, true);
 +                    }
 +                } else {
 +                    throw new CloudRuntimeException("Unknown resource state event:" + event);
 +                }
 +            }
 +
 +            return result;
 +        }
 +    }
 +
 +    @Override
 +    public void checkCIDR(final HostPodVO pod, final DataCenterVO dc, final String serverPrivateIP, final String serverPrivateNetmask) throws IllegalArgumentException {
 +        if (serverPrivateIP == null) {
 +            return;
 +        }
 +        // Get the CIDR address and CIDR size
 +        final String cidrAddress = pod.getCidrAddress();
 +        final long cidrSize = pod.getCidrSize();
 +
 +        // If the server's private IP address is not in the same subnet as the
 +        // pod's CIDR, return false
 +        final String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSize);
 +        final String serverSubnet = NetUtils.getSubNet(serverPrivateIP, serverPrivateNetmask);
 +        if (!cidrSubnet.equals(serverSubnet)) {
 +            s_logger.warn("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " +
 +                    dc.getName());
 +            throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() +
 +                    " and zone: " + dc.getName());
 +        }
 +
 +        // If the server's private netmask is less inclusive than the pod's CIDR
 +        // netmask, return false
 +        final String cidrNetmask = NetUtils.getCidrSubNet("255.255.255.255", cidrSize);
 +        final long cidrNetmaskNumeric = NetUtils.ip2Long(cidrNetmask);
 +        final long serverNetmaskNumeric = NetUtils.ip2Long(serverPrivateNetmask);
 +        if (serverNetmaskNumeric > cidrNetmaskNumeric) {
 +            throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() +
 +                    " and zone: " + dc.getName());
 +        }
 +
 +    }
 +
 +    private boolean checkCIDR(final HostPodVO pod, final String serverPrivateIP, final String serverPrivateNetmask) {
 +        if (serverPrivateIP == null) {
 +            return true;
 +        }
 +        // Get the CIDR address and CIDR size
 +        final String cidrAddress = pod.getCidrAddress();
 +        final long cidrSize = pod.getCidrSize();
 +
 +        // If the server's private IP address is not in the same subnet as the
 +        // pod's CIDR, return false
 +        final String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSize);
 +        final String serverSubnet = NetUtils.getSubNet(serverPrivateIP, serverPrivateNetmask);
 +        if (!cidrSubnet.equals(serverSubnet)) {
 +            return false;
 +        }
 +
 +        // If the server's private netmask is less inclusive than the pod's CIDR
 +        // netmask, return false
 +        final String cidrNetmask = NetUtils.getCidrSubNet("255.255.255.255", cidrSize);
 +        final long cidrNetmaskNumeric = NetUtils.ip2Long(cidrNetmask);
 +        final long serverNetmaskNumeric = NetUtils.ip2Long(serverPrivateNetmask);
 +        if (serverNetmaskNumeric > cidrNetmaskNumeric) {
 +            return false;
 +        }
 +        return true;
 +    }
 +
 +    private HostVO getNewHost(StartupCommand[] startupCommands) {
 +        StartupCommand startupCommand = startupCommands[0];
 +
 +        HostVO host = findHostByGuid(startupCommand.getGuid());
 +
 +        if (host != null) {
 +            return host;
 +        }
 +
 +        host = findHostByGuid(startupCommand.getGuidWithoutResource());
 +
 +        if (host != null) {
 +            return host;
 +        }
 +
 +        return null;
 +    }
 +
 +    protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map<String, String> details, List<String> hostTags,
 +            final ResourceStateAdapter.Event stateEvent) {
 +        boolean newHost = false;
 +        StartupCommand startup = cmds[0];
 +
 +        HostVO host = getNewHost(cmds);
 +
 +        if (host == null) {
 +            host = new HostVO(startup.getGuid());
 +
 +            newHost = true;
 +        }
 +
 +        String dataCenter = startup.getDataCenter();
 +        String pod = startup.getPod();
 +        final String cluster = startup.getCluster();
 +
 +        if (pod != null && dataCenter != null && pod.equalsIgnoreCase("default") && dataCenter.equalsIgnoreCase("default")) {
 +            final List<HostPodVO> pods = _podDao.listAllIncludingRemoved();
 +            for (final HostPodVO hpv : pods) {
 +                if (checkCIDR(hpv, startup.getPrivateIpAddress(), startup.getPrivateNetmask())) {
 +                    pod = hpv.getName();
 +                    dataCenter = _dcDao.findById(hpv.getDataCenterId()).getName();
 +                    break;
 +                }
 +            }
 +        }
 +
 +        long dcId = -1;
 +        DataCenterVO dc = _dcDao.findByName(dataCenter);
 +        if (dc == null) {
 +            try {
 +                dcId = Long.parseLong(dataCenter);
 +                dc = _dcDao.findById(dcId);
 +            } catch (final NumberFormatException e) {
 +                s_logger.debug("Cannot parse " + dataCenter + " into Long.");
 +            }
 +        }
 +        if (dc == null) {
 +            throw new IllegalArgumentException("Host " + startup.getPrivateIpAddress() + " sent incorrect data center: " + dataCenter);
 +        }
 +        dcId = dc.getId();
 +
 +        HostPodVO p = _podDao.findByName(pod, dcId);
 +        if (p == null) {
 +            try {
 +                final long podId = Long.parseLong(pod);
 +                p = _podDao.findById(podId);
 +            } catch (final NumberFormatException e) {
 +                s_logger.debug("Cannot parse " + pod + " into Long.");
 +            }
 +        }
 +        /*
 +         * ResourceStateAdapter is responsible for throwing Exception if Pod is
 +         * null and non-null is required. for example, XcpServerDiscoever.
 +         * Others, like PxeServer, ExternalFireware don't require Pod
 +         */
 +        final Long podId = p == null ? null : p.getId();
 +
 +        Long clusterId = null;
 +        if (cluster != null) {
 +            try {
 +                clusterId = Long.valueOf(cluster);
 +            } catch (final NumberFormatException e) {
 +                if (podId != null) {
 +                    ClusterVO c = _clusterDao.findBy(cluster, podId.longValue());
 +                    if (c == null) {
 +                        c = new ClusterVO(dcId, podId.longValue(), cluster);
 +                        c = _clusterDao.persist(c);
 +                    }
 +                    clusterId = c.getId();
 +                }
 +            }
 +        }
 +
 +        if (startup instanceof StartupRoutingCommand) {
 +            final StartupRoutingCommand ssCmd = (StartupRoutingCommand)startup;
 +            final List<String> implicitHostTags = ssCmd.getHostTags();
 +            if (!implicitHostTags.isEmpty()) {
 +                if (hostTags == null) {
 +                    hostTags = _hostTagsDao.gethostTags(host.getId());
 +                }
 +                if (hostTags != null) {
 +                    implicitHostTags.removeAll(hostTags);
 +                    hostTags.addAll(implicitHostTags);
 +                } else {
 +                    hostTags = implicitHostTags;
 +                }
 +            }
 +        }
 +
 +        host.setDataCenterId(dc.getId());
 +        host.setPodId(podId);
 +        host.setClusterId(clusterId);
 +        host.setPrivateIpAddress(startup.getPrivateIpAddress());
 +        host.setPrivateNetmask(startup.getPrivateNetmask());
 +        host.setPrivateMacAddress(startup.getPrivateMacAddress());
 +        host.setPublicIpAddress(startup.getPublicIpAddress());
 +        host.setPublicMacAddress(startup.getPublicMacAddress());
 +        host.setPublicNetmask(startup.getPublicNetmask());
 +        host.setStorageIpAddress(startup.getStorageIpAddress());
 +        host.setStorageMacAddress(startup.getStorageMacAddress());
 +        host.setStorageNetmask(startup.getStorageNetmask());
 +        host.setVersion(startup.getVersion());
 +        host.setName(startup.getName());
 +        host.setManagementServerId(_nodeId);
 +        host.setStorageUrl(startup.getIqn());
 +        host.setLastPinged(System.currentTimeMillis() >> 10);
 +        host.setHostTags(hostTags);
 +        host.setDetails(details);
 +        if (startup.getStorageIpAddressDeux() != null) {
 +            host.setStorageIpAddressDeux(startup.getStorageIpAddressDeux());
 +            host.setStorageMacAddressDeux(startup.getStorageMacAddressDeux());
 +            host.setStorageNetmaskDeux(startup.getStorageNetmaskDeux());
 +        }
 +        if (resource != null) {
 +            /* null when agent is connected agent */
 +            host.setResource(resource.getClass().getName());
 +        }
 +
 +        host = (HostVO)dispatchToStateAdapters(stateEvent, true, host, cmds, resource, details, hostTags);
 +        if (host == null) {
 +            throw new CloudRuntimeException("No resource state adapter response");
 +        }
 +
 +        if (newHost) {
 +            host = _hostDao.persist(host);
 +        } else {
 +            _hostDao.update(host.getId(), host);
 +        }
 +
 +        if (startup instanceof StartupRoutingCommand) {
 +            final StartupRoutingCommand ssCmd = (StartupRoutingCommand)startup;
 +
 +            updateSupportsClonedVolumes(host, ssCmd.getSupportsClonedVolumes());
 +        }
 +
 +        try {
 +            resourceStateTransitTo(host, ResourceState.Event.InternalCreated, _nodeId);
 +            /* Agent goes to Connecting status */
 +            _agentMgr.agentStatusTransitTo(host, Status.Event.AgentConnected, _nodeId);
 +        } catch (final Exception e) {
 +            s_logger.debug("Cannot transmit host " + host.getId() + " to Creating state", e);
 +            _agentMgr.agentStatusTransitTo(host, Status.Event.Error, _nodeId);
 +            try {
 +                resourceStateTransitTo(host, ResourceState.Event.Error, _nodeId);
 +            } catch (final NoTransitionException e1) {
 +                s_logger.debug("Cannot transmit host " + host.getId() + "to Error state", e);
 +            }
 +        }
 +
 +        return host;
 +    }
 +
 +    private void updateSupportsClonedVolumes(HostVO host, boolean supportsClonedVolumes) {
 +        final String name = "supportsResign";
 +
 +        DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), name);
 +
 +        if (hostDetail != null) {
 +            if (supportsClonedVolumes) {
 +                hostDetail.setValue(Boolean.TRUE.toString());
 +
 +                _hostDetailsDao.update(hostDetail.getId(), hostDetail);
 +            }
 +            else {
 +                _hostDetailsDao.remove(hostDetail.getId());
 +            }
 +        }
 +        else {
 +            if (supportsClonedVolumes) {
 +                hostDetail = new DetailVO(host.getId(), name, Boolean.TRUE.toString());
 +
 +                _hostDetailsDao.persist(hostDetail);
 +            }
 +        }
 +
 +        boolean clusterSupportsResigning = true;
 +
 +        List<HostVO> hostVOs = _hostDao.findByClusterId(host.getClusterId());
 +
 +        for (HostVO hostVO : hostVOs) {
 +            DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostVO.getId(), name);
 +
 +            if (hostDetailVO == null || Boolean.parseBoolean(hostDetailVO.getValue()) == false) {
 +                clusterSupportsResigning = false;
 +
 +                break;
 +            }
 +        }
 +
 +        ClusterDetailsVO clusterDetailsVO = _clusterDetailsDao.findDetail(host.getClusterId(), name);
 +
 +        if (clusterDetailsVO != null) {
 +            if (clusterSupportsResigning) {
 +                clusterDetailsVO.setValue(Boolean.TRUE.toString());
 +
 +                _clusterDetailsDao.update(clusterDetailsVO.getId(), clusterDetailsVO);
 +            }
 +            else {
 +                _clusterDetailsDao.remove(clusterDetailsVO.getId());
 +            }
 +        }
 +        else {
 +            if (clusterSupportsResigning) {
 +                clusterDetailsVO = new ClusterDetailsVO(host.getClusterId(), name, Boolean.TRUE.toString());
 +
 +                _clusterDetailsDao.persist(clusterDetailsVO);
 +            }
 +        }
 +    }
 +
 +    private boolean isFirstHostInCluster(final HostVO host) {
 +        boolean isFirstHost = true;
 +        if (host.getClusterId() != null) {
 +            final SearchBuilder<HostVO> sb = _hostDao.createSearchBuilder();
 +            sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NULL);
 +            sb.and("cluster", sb.entity().getClusterId(), SearchCriteria.Op.EQ);
 +            sb.done();
 +            final SearchCriteria<HostVO> sc = sb.create();
 +            sc.setParameters("cluster", host.getClusterId());
 +
 +            final List<HostVO> hosts = _hostDao.search(sc, null);
 +            if (hosts != null && hosts.size() > 1) {
 +                isFirstHost = false;
 +            }
 +        }
 +        return isFirstHost;
 +    }
 +
 +    private void markHostAsDisconnected(HostVO host, final StartupCommand[] cmds) {
 +        if (host == null) { // in case host is null due to some errors, try
 +            // reloading the host from db
 +            if (cmds != null) {
 +                final StartupCommand firstCmd = cmds[0];
 +                host = findHostByGuid(firstCmd.getGuid());
 +                if (host == null) {
 +                    host = findHostByGuid(firstCmd.getGuidWithoutResource());
 +                }
 +            }
 +        }
 +
 +        if (host != null) {
 +            // Change agent status to Alert, so that host is considered for
 +            // reconnection next time
 +            _agentMgr.agentStatusTransitTo(host, Status.Event.AgentDisconnected, _nodeId);
 +        }
 +    }
 +
 +    private Host createHostAndAgent(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance) {
 +        HostVO host = null;
 +        StartupCommand[] cmds = null;
 +        boolean hostExists = false;
 +        boolean created = false;
 +
 +        try {
 +            cmds = resource.initialize();
 +            if (cmds == null) {
 +                s_logger.info("Unable to fully initialize the agent because no StartupCommands are returned");
 +                return null;
 +            }
 +
 +            /* Generate a random version in a dev setup situation */
 +            if (this.getClass().getPackage().getImplementationVersion() == null) {
 +                for (final StartupCommand cmd : cmds) {
 +                    if (cmd.getVersion() == null) {
 +                        cmd.setVersion(Long.toString(System.currentTimeMillis()));
 +                    }
 +                }
 +            }
 +
 +            if (s_logger.isDebugEnabled()) {
 +                new Request(-1l, -1l, cmds, true, false).logD("Startup request from directly connected host: ", true);
 +            }
 +
 +            if (old) {
 +                final StartupCommand firstCmd = cmds[0];
 +                host = findHostByGuid(firstCmd.getGuid());
 +                if (host == null) {
 +                    host = findHostByGuid(firstCmd.getGuidWithoutResource());
 +                }
 +                if (host != null && host.getRemoved() == null) { // host already added, no need to add again
 +                    s_logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new");
 +                    hostExists = true; // ensures that host status is left unchanged in case of adding same one again
 +                    return null;
 +                }
 +            }
 +
 +            // find out if the host we want to connect to is new (so we can send an event)
 +            boolean newHost = getNewHost(cmds) == null;
 +
 +            host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
 +
 +            if (host != null) {
 +                created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost);
 +                /* reload myself from database */
 +                host = _hostDao.findById(host.getId());
 +            }
 +        } catch (final Exception e) {
 +            s_logger.warn("Unable to connect due to ", e);
 +        } finally {
 +            if (hostExists) {
 +                if (cmds != null) {
 +                    resource.disconnected();
 +                }
 +            } else {
 +                if (!created) {
 +                    if (cmds != null) {
 +                        resource.disconnected();
 +                    }
 +                    markHostAsDisconnected(host, cmds);
 +                }
 +            }
 +        }
 +
 +        return host;
 +    }
 +
 +    private Host createHostAndAgentDeferred(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance) {
 +        HostVO host = null;
 +        StartupCommand[] cmds = null;
 +        boolean hostExists = false;
 +        boolean deferAgentCreation = true;
 +        boolean created = false;
 +
 +        try {
 +            cmds = resource.initialize();
 +            if (cmds == null) {
 +                s_logger.info("Unable to fully initialize the agent because no StartupCommands are returned");
 +                return null;
 +            }
 +
 +            /* Generate a random version in a dev setup situation */
 +            if (this.getClass().getPackage().getImplementationVersion() == null) {
 +                for (final StartupCommand cmd : cmds) {
 +                    if (cmd.getVersion() == null) {
 +                        cmd.setVersion(Long.toString(System.currentTimeMillis()));
 +                    }
 +                }
 +            }
 +
 +            if (s_logger.isDebugEnabled()) {
 +                new Request(-1l, -1l, cmds, true, false).logD("Startup request from directly connected host: ", true);
 +            }
 +
 +            if (old) {
 +                final StartupCommand firstCmd = cmds[0];
 +                host = findHostByGuid(firstCmd.getGuid());
 +                if (host == null) {
 +                    host = findHostByGuid(firstCmd.getGuidWithoutResource());
 +                }
 +                if (host != null && host.getRemoved() == null) { // host already
 +                    // added, no
 +                    // need to add
 +                    // again
 +                    s_logger.debug("Found the host " + host.getId() + " by guid: " + firstCmd.getGuid() + ", old host reconnected as new");
 +                    hostExists = true; // ensures that host status is left
 +                    // unchanged in case of adding same one
 +                    // again
 +                    return null;
 +                }
 +            }
 +
 +            host = null;
 +            boolean newHost = false;
 +
 +            final GlobalLock addHostLock = GlobalLock.getInternLock("AddHostLock");
 +
 +            try {
 +                if (addHostLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
 +                    // to safely determine first host in cluster in multi-MS scenario
 +                    try {
 +                        // find out if the host we want to connect to is new (so we can send an event)
 +                        newHost = getNewHost(cmds) == null;
 +
 +                        host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
 +
 +                        if (host != null) {
 +                            // if first host in cluster no need to defer agent creation
 +                            deferAgentCreation = !isFirstHostInCluster(host);
 +                        }
 +                    } finally {
 +                        addHostLock.unlock();
 +                    }
 +                }
 +            } finally {
 +                addHostLock.releaseRef();
 +            }
 +
 +            if (host != null) {
 +                if (!deferAgentCreation) { // if first host in cluster then
 +                    created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost);
 +                    host = _hostDao.findById(host.getId()); // reload
 +                } else {
 +                    host = _hostDao.findById(host.getId()); // reload
 +                    // force host status to 'Alert' so that it is loaded for
 +                    // connection during next scan task
 +                    _agentMgr.agentStatusTransitTo(host, Status.Event.AgentDisconnected, _nodeId);
 +
 +                    host = _hostDao.findById(host.getId()); // reload
 +                    host.setLastPinged(0); // so that scan task can pick it up
 +                    _hostDao.update(host.getId(), host);
 +
 +                }
 +            }
 +        } catch (final Exception e) {
 +            s_logger.warn("Unable to connect due to ", e);
 +        } finally {
 +            if (hostExists) {
 +                if (cmds != null) {
 +                    resource.disconnected();
 +                }
 +            } else {
 +                if (!deferAgentCreation && !created) {
 +                    if (cmds != null) {
 +                        resource.disconnected();
 +                    }
 +                    markHostAsDisconnected(host, cmds);
 +                }
 +            }
 +        }
 +
 +        return host;
 +    }
 +
 +    @Override
 +    public Host createHostAndAgent(final Long hostId, final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance) {
 +        final Host host = createHostAndAgent(resource, details, old, hostTags, forRebalance);
 +        return host;
 +    }
 +
 +    @Override
 +    public Host addHost(final long zoneId, final ServerResource resource, final Type hostType, final Map<String, String> hostDetails) {
 +        // Check if the zone exists in the system
 +        if (_dcDao.findById(zoneId) == null) {
 +            throw new InvalidParameterValueException("Can't find zone with id " + zoneId);
 +        }
 +
 +        final Map<String, String> details = hostDetails;
 +        final String guid = details.get("guid");
 +        final List<HostVO> currentHosts = listAllUpAndEnabledHostsInOneZoneByType(hostType, zoneId);
 +        for (final HostVO currentHost : currentHosts) {
 +            if (currentHost.getGuid().equals(guid)) {
 +                return currentHost;
 +            }
 +        }
 +
 +        return createHostAndAgent(resource, hostDetails, true, null, false);
 +    }
 +
 +    @Override
 +    public HostVO createHostVOForConnectedAgent(final StartupCommand[] cmds) {
 +        return createHostVO(cmds, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED);
 +    }
 +
 +    private void checkIPConflicts(final HostPodVO pod, final DataCenterVO dc, final String serverPrivateIP, final String serverPrivateNetmask, final String serverPublicIP, final String serverPublicNetmask) {
 +        // If the server's private IP is the same as is public IP, this host has
 +        // a host-only private network. Don't check for conflicts with the
 +        // private IP address table.
 +        if (!ObjectUtils.equals(serverPrivateIP, serverPublicIP)) {
 +            if (!_privateIPAddressDao.mark(dc.getId(), pod.getId(), serverPrivateIP)) {
 +                // If the server's private IP address is already in the
 +                // database, return false
 +                final List<DataCenterIpAddressVO> existingPrivateIPs = _privateIPAddressDao.listByPodIdDcIdIpAddress(pod.getId(), dc.getId(), serverPrivateIP);
 +
 +                assert existingPrivateIPs.size() <= 1 : " How can we get more than one ip address with " + serverPrivateIP;
 +                if (existingPrivateIPs.size() > 1) {
 +                    throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is already in use in pod: " + pod.getName() +
 +                            " and zone: " + dc.getName());
 +                }
 +                if (existingPrivateIPs.size() == 1) {
 +                    final DataCenterIpAddressVO vo = existingPrivateIPs.get(0);
 +                    if (vo.getInstanceId() != null) {
 +                        throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is already in use in pod: " + pod.getName() +
 +                                " and zone: " + dc.getName());
 +                    }
 +                }
 +            }
 +        }
 +
 +        if (serverPublicIP != null && !_publicIPAddressDao.mark(dc.getId(), new Ip(serverPublicIP))) {
 +            // If the server's public IP address is already in the database,
 +            // return false
 +            final List<IPAddressVO> existingPublicIPs = _publicIPAddressDao.listByDcIdIpAddress(dc.getId(), serverPublicIP);
 +            if (existingPublicIPs.size() > 0) {
 +                throw new IllegalArgumentException("The public ip address of the server (" + serverPublicIP + ") is already in use in zone: " + dc.getName());
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public HostVO fillRoutingHostVO(final HostVO host, final StartupRoutingCommand ssCmd, final HypervisorType hyType, Map<String, String> details, final List<String> hostTags) {
 +        if (host.getPodId() == null) {
 +            s_logger.error("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null");
 +            throw new IllegalArgumentException("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null");
 +        }
 +
 +        final ClusterVO clusterVO = _clusterDao.findById(host.getClusterId());
 +        if (clusterVO.getHypervisorType() != hyType) {
 +            throw new IllegalArgumentException("Can't add host whose hypervisor type is: " + hyType + " into cluster: " + clusterVO.getId() +
 +                    " whose hypervisor type is: " + clusterVO.getHypervisorType());
 +        }
 +
 +        final Map<String, String> hostDetails = ssCmd.getHostDetails();
 +        if (hostDetails != null) {
 +            if (details != null) {
 +                details.putAll(hostDetails);
 +            } else {
 +                details = hostDetails;
 +            }
 +        }
 +
 +        final HostPodVO pod = _podDao.findById(host.getPodId());
 +        final DataCenterVO dc = _dcDao.findById(host.getDataCenterId());
 +        checkIPConflicts(pod, dc, ssCmd.getPrivateIpAddress(), ssCmd.getPublicIpAddress(), ssCmd.getPublicIpAddress(), ssCmd.getPublicNetmask());
 +        host.setType(com.cloud.host.Host.Type.Routing);
 +        host.setDetails(details);
 +        host.setCaps(ssCmd.getCapabilities());
 +        host.setCpuSockets(ssCmd.getCpuSockets());
 +        host.setCpus(ssCmd.getCpus());
 +        host.setTotalMemory(ssCmd.getMemory());
 +        host.setSpeed(ssCmd.getSpeed());
 +        host.setHypervisorType(hyType);
 +        host.setHypervisorVersion(ssCmd.getHypervisorVersion());
 +        host.setGpuGroups(ssCmd.getGpuGroupDetails());
 +        return host;
 +    }
 +
 +    @Override
 +    public void deleteRoutingHost(final HostVO host, final boolean isForced, final boolean forceDestroyStorage) throws UnableDeleteHostException {
 +        if (host.getType() != Host.Type.Routing) {
 +            throw new CloudRuntimeException("Non-Routing host gets in deleteRoutingHost, id is " + host.getId());
 +        }
 +
 +        if (s_logger.isDebugEnabled()) {
 +            s_logger.debug("Deleting Host: " + host.getId() + " Guid:" + host.getGuid());
 +        }
 +
 +        if (forceDestroyStorage) {
 +            // put local storage into mainenance mode, will set all the VMs on
 +            // this local storage into stopped state
 +            final StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host.getId());
 +            if (storagePool != null) {
 +                if (storagePool.getStatus() == StoragePoolStatus.Up || storagePool.getStatus() == StoragePoolStatus.ErrorInMaintenance) {
 +                    try {
 +                        final StoragePool pool = _storageSvr.preparePrimaryStorageForMaintenance(storagePool.getId());
 +                        if (pool == null) {
 +                            s_logger.debug("Failed to set primary storage into maintenance mode");
 +
 +                            throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode");
 +                        }
 +                    } catch (final Exception e) {
 +                        s_logger.debug("Failed to set primary storage into maintenance mode, due to: " + e.toString());
 +                        throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode, due to: " + e.toString());
 +                    }
 +                }
 +
 +                final List<VMInstanceVO> vmsOnLocalStorage = _storageMgr.listByStoragePool(storagePool.getId());
 +                for (final VMInstanceVO vm : vmsOnLocalStorage) {
 +                    try {
 +                        _vmMgr.destroy(vm.getUuid(), false);
 +                    } catch (final Exception e) {
 +                        final String errorMsg = "There was an error Destory the vm: " + vm + " as a part of hostDelete id=" + host.getId();
 +                        s_logger.debug(errorMsg, e);
 +                        throw new UnableDeleteHostException(errorMsg + "," + e.getMessage());
 +                    }
 +                }
 +            }
 +        } else {
 +            // Check if there are vms running/starting/stopping on this host
 +            final List<VMInstanceVO> vms = _vmDao.listByHostId(host.getId());
 +            if (!vms.isEmpty()) {
 +                if (isForced) {
 +                    // Stop HA disabled vms and HA enabled vms in Stopping state
 +                    // Restart HA enabled vms
 +                    for (final VMInstanceVO vm : vms) {
 +                        if (!vm.isHaEnabled() || vm.getState() == State.Stopping) {
 +                            s_logger.debug("Stopping vm: " + vm + " as a part of deleteHost id=" + host.getId());
 +                            try {
 +                                _vmMgr.advanceStop(vm.getUuid(), false);
 +                            } catch (final Exception e) {
 +                                final String errorMsg = "There was an error stopping the vm: " + vm + " as a part of hostDelete id=" + host.getId();
 +                                s_logger.debug(errorMsg, e);
 +                                throw new UnableDeleteHostException(errorMsg + "," + e.getMessage());
 +                            }
 +                        } else if (vm.isHaEnabled() && (vm.getState() == State.Running || vm.getState() == State.Starting)) {
 +                            s_logger.debug("Scheduling restart for vm: " + vm + " " + vm.getState() + " on the host id=" + host.getId());
 +                            _haMgr.scheduleRestart(vm, false);
 +                        }
 +                    }
 +                } else {
 +                    throw new UnableDeleteHostException("Unable to delete the host as there are vms in " + vms.get(0).getState() +
 +                            " state using this host and isForced=false specified");
 +                }
 +            }
 +        }
 +    }
 +
 +    private boolean doCancelMaintenance(final long hostId) {
 +        HostVO host;
 +        host = _hostDao.findById(hostId);
 +        if (host == null || host.getRemoved() != null) {
 +            s_logger.warn("Unable to find host " + hostId);
 +            return true;
 +        }
 +
 +        /*
 +         * TODO: think twice about returning true or throwing out exception, I
 +         * really prefer to exception that always exposes bugs
 +         */
 +        if (host.getResourceState() != ResourceState.PrepareForMaintenance && host.getResourceState() != ResourceState.Maintenance &&
 +                host.getResourceState() != ResourceState.ErrorInMaintenance) {
 +            throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId);
 +        }
 +
 +        /* TODO: move to listener */
 +        _haMgr.cancelScheduledMigrations(host);
 +
 +        boolean vms_migrating = false;
 +        final List<VMInstanceVO> vms = _haMgr.findTakenMigrationWork();
 +        for (final VMInstanceVO vm : vms) {
 +            if (vm.getHostId() != null && vm.getHostId() == hostId) {
 +                s_logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId);
 +                vms_migrating = true;
 +            }
 +        }
 +
++        handleAgentIfNotConnected(host, vms_migrating);
++
 +        try {
 +            resourceStateTransitTo(host, ResourceState.Event.AdminCancelMaintenance, _nodeId);
 +            _agentMgr.pullAgentOutMaintenance(hostId);
 +            retryHostMaintenance.remove(hostId);
++        } catch (final NoTransitionException e) {
++            s_logger.debug("Cannot transmit host " + host.getId() + "to Enabled state", e);
++            return false;
++        }
 +
-             // for kvm, need to log into kvm host, restart cloudstack-agent
-             if ((host.getHypervisorType() == HypervisorType.KVM && !vms_migrating) || host.getHypervisorType() == HypervisorType.LXC) {
++        return true;
 +
-                 final boolean sshToAgent = Boolean.parseBoolean(_configDao.getValue(Config.KvmSshToAgentEnabled.key()));
-                 if (!sshToAgent) {
-                     s_logger.info("Configuration tells us not to SSH into Agents. Please restart the Agent (" + hostId + ")  manually");
-                     return true;
-                 }
++    }
 +
-                 _hostDao.loadDetails(host);
-                 final String password = host.getDetail("password");
-                 final String username = host.getDetail("username");
-                 if (password == null || username == null) {
-                     s_logger.debug("Can't find password/username");
-                     return false;
-                 }
-                 final com.trilead.ssh2.Connection connection = SSHCmdHelper.acquireAuthorizedConnection(host.getPrivateIpAddress(), 22, username, password);
-                 if (connection == null) {
-                     s_logger.debug("Failed to connect to host: " + host.getPrivateIpAddress());
-                     return false;
-                 }
++    /**
++     * Handle agent (if available) if its not connected before cancelling maintenance.
++     * Agent must be connected before cancelling maintenance.
++     * If the host status is not Up:
++     * - If kvm.ssh.to.agent is true, then SSH into the host and restart the agent.
++     * - If kvm.shh.to.agent is false, then fail cancelling maintenance
++     */
++    protected void handleAgentIfNotConnected(HostVO host, boolean vmsMigrating) {
++        final boolean isAgentOnHost = host.getHypervisorType() == HypervisorType.KVM ||
++                host.getHypervisorType() == HypervisorType.LXC;
++        if (!isAgentOnHost || vmsMigrating || host.getStatus() == Status.Up) {
++            return;
++        }
++        final boolean sshToAgent = Boolean.parseBoolean(_configDao.getValue(KvmSshToAgentEnabled.key()));
++        if (sshToAgent) {
++            Pair<String, String> credentials = getHostCredentials(host);
++            connectAndRestartAgentOnHost(host, credentials.first(), credentials.second());
++        } else {
++            throw new CloudRuntimeException("SSH access is disabled, cannot cancel maintenance mode as " +
++                    "host agent is not connected");
++        }
++    }
 +
-                 try {
-                     SSHCmdHelper.SSHCmdResult result = SSHCmdHelper.sshExecuteCmdOneShot(connection, "service cloudstack-agent restart");
-                     s_logger.debug("cloudstack-agent restart result: " + result.toString());
-                 } catch (final SshException e) {
-                     return false;
-                 }
-             }
++    /**
++     * Get host credentials
++     * @throws CloudRuntimeException if username or password are not found
++     */
++    protected Pair<String, String> getHostCredentials(HostVO host) {
++        _hostDao.loadDetails(host);
++        final String password = host.getDetail("password");
++        final String username = host.getDetail("username");
++        if (password == null || username == null) {
++            throw new CloudRuntimeException("SSH to agent is enabled, but username/password credentials are not found");
++        }
++        return new Pair<>(username, password);
++    }
 +
-             return true;
-         } catch (final NoTransitionException e) {
-             s_logger.debug("Cannot transmit host " + host.getId() + "to Enabled state", e);
-             return false;
++    /**
++     * True if agent is restarted via SSH. Assumes kvm.ssh.to.agent = true and host status is not Up
++     */
++    protected void connectAndRestartAgentOnHost(HostVO host, String username, String password) {
++        final com.trilead.ssh2.Connection connection = SSHCmdHelper.acquireAuthorizedConnection(
++                host.getPrivateIpAddress(), 22, username, password);
++        if (connection == null) {
++            throw new CloudRuntimeException("SSH to agent is enabled, but failed to connect to host: " + host.getPrivateIpAddress());
++        }
++        try {
++            SSHCmdHelper.SSHCmdResult result = SSHCmdHelper.sshExecuteCmdOneShot(
++                    connection, "service cloudstack-agent restart");
++            if (result.getReturnCode() != 0) {
++                throw new CloudRuntimeException("Could not restart agent on host " + host.getId() + " due to: " + result.getStdErr());
++            }
++            s_logger.debug("cloudstack-agent restart result: " + result.toString());
++        } catch (final SshException e) {
++            throw new CloudRuntimeException("SSH to agent is enabled, but agent restart failed", e);
 +        }
 +    }
 +
 +    private boolean cancelMaintenance(final long hostId) {
 +        try {
 +            final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.AdminCancelMaintenance);
 +
 +            if (result != null) {
 +                return result;
 +            }
 +        } catch (final AgentUnavailableException e) {
 +            return false;
 +        }
 +
 +        return doCancelMaintenance(hostId);
 +    }
 +
 +    @Override
 +    public boolean executeUserRequest(final long hostId, final ResourceState.Event event) throws AgentUnavailableException {
 +        if (event == ResourceState.Event.AdminAskMaintenace) {
 +            return doMaintain(hostId);
 +        } else if (event == ResourceState.Event.AdminCancelMaintenance) {
 +            return doCancelMaintenance(hostId);
 +        } else if (event == ResourceState.Event.DeleteHost) {
 +            return doDeleteHost(hostId, false, false);
 +        } else if (event == ResourceState.Event.Unmanaged) {
 +            return doUmanageHost(hostId);
 +        } else if (event == ResourceState.Event.UpdatePassword) {
 +            return doUpdateHostPassword(hostId);
 +        } else {
 +            throw new CloudRuntimeException("Received an resource event we are not handling now, " + event);
 +        }
 +    }
 +
 +    private boolean doUmanageHost(final long hostId) {
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            s_logger.debug("Cannot find host " + hostId + ", assuming it has been deleted, skip umanage");
 +            return true;
 +        }
 +
 +        if (host.getHypervisorType() == HypervisorType.KVM || host.getHypervisorType() == HypervisorType.LXC) {
 +            _agentMgr.easySend(hostId, new MaintainCommand());
 +        }
 +
 +        _agentMgr.disconnectWithoutInvestigation(hostId, Event.ShutdownRequested);
 +        return true;
 +    }
 +
 +    @Override
 +    public boolean umanageHost(final long hostId) {
 +        try {
 +            final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.Unmanaged);
 +
 +            if (result != null) {
 +                return result;
 +            }
 +        } catch (final AgentUnavailableException e) {
 +            return false;
 +        }
 +
 +        return doUmanageHost(hostId);
 +    }
 +
 +    private boolean doUpdateHostPassword(final long hostId) {
 +        if (!_agentMgr.isAgentAttached(hostId)) {
 +            return false;
 +        }
 +
 +        DetailVO nv = _hostDetailsDao.findDetail(hostId, ApiConstants.USERNAME);
 +        final String username = nv.getValue();
 +        nv = _hostDetailsDao.findDetail(hostId, ApiConstants.PASSWORD);
 +        final String password = nv.getValue();
 +
 +
 +        final HostVO host = _hostDao.findById(hostId);
 +        final String hostIpAddress = host.getPrivateIpAddress();
 +
 +        final UpdateHostPasswordCommand cmd = new UpdateHostPasswordCommand(username, password, hostIpAddress);
 +        final Answer answer = _agentMgr.easySend(hostId, cmd);
 +
 +        s_logger.info("Result returned from update host password ==> " + answer.getDetails());
 +        return answer.getResult();
 +    }
 +
 +    @Override
 +    public boolean updateClusterPassword(final UpdateHostPasswordCmd command) {
 +        final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost();
 +        // get agents for the cluster
 +        final List<HostVO> hosts = listAllHostsInCluster(command.getClusterId());
 +        for (final HostVO host : hosts) {
 +            try {
 +                final Boolean result = propagateResourceEvent(host.getId(), ResourceState.Event.UpdatePassword);
 +                if (result != null) {
 +                    return result;
 +                }
 +            } catch (final AgentUnavailableException e) {
 +                s_logger.error("Agent is not availbale!", e);
 +            }
 +
 +            if (shouldUpdateHostPasswd) {
 +                final boolean isUpdated = doUpdateHostPassword(host.getId());
 +                if (!isUpdated) {
 +                    throw new CloudRuntimeException("CloudStack failed to update the password of the Host with UUID / ID ==> " + host.getUuid() + " / " + host.getId() + ". Please make sure you are still able to connect to your hosts.");
 +                }
 +            }
 +        }
 +
 +        return true;
 +    }
 +
 +    @Override
 +    public boolean updateHostPassword(final UpdateHostPasswordCmd command) {
 +        // update agent attache password
 +        try {
 +            final Boolean result = propagateResourceEvent(command.getHostId(), ResourceState.Event.UpdatePassword);
 +            if (result != null) {
 +                return result;
 +            }
 +        } catch (final AgentUnavailableException e) {
 +            s_logger.error("Agent is not availbale!", e);
 +        }
 +
 +        final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost();
 +        // If shouldUpdateHostPasswd has been set to false, the method doUpdateHostPassword() won't be called.
 +        return shouldUpdateHostPasswd && doUpdateHostPassword(command.getHostId());
 +    }
 +
 +    public String getPeerName(final long agentHostId) {
 +
 +        final HostVO host = _hostDao.findById(agentHostId);
 +        if (host != null && host.getManagementServerId() != null) {
 +            if (_clusterMgr.getSelfPeerName().equals(Long.toString(host.getManagementServerId()))) {
 +                return null;
 +            }
 +
 +            return Long.toString(host.getManagementServerId());
 +        }
 +        return null;
 +    }
 +
 +    public Boolean propagateResourceEvent(final long agentId, final ResourceState.Event event) throws AgentUnavailableException {
 +        final String msPeer = getPeerName(agentId);
 +        if (msPeer == null) {
 +            return null;
 +        }
 +
 +            s_logger.debug("Propagating resource request event:" + event.toString() + " to agent:" + agentId);
 +        final Command[] cmds = new Command[1];
 +        cmds[0] = new PropagateResourceEventCommand(agentId, event);
 +
 +        final String AnsStr = _clusterMgr.execute(msPeer, agentId, _gson.toJson(cmds), true);
 +        if (AnsStr == null) {
 +            throw new AgentUnavailableException(agentId);
 +        }
 +
 +        final Answer[] answers = _gson.fromJson(AnsStr, Answer[].class);
 +
 +        if (s_logger.isDebugEnabled()) {
 +            s_logger.debug("Result for agent change is " + answers[0].getResult());
 +        }
 +
 +        return answers[0].getResult();
 +    }
 +
 +    @Override
 +    public boolean maintenanceFailed(final long hostId) {
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            if (s_logger.isDebugEnabled()) {
 +                s_logger.debug("Cant not find host " + hostId);
 +            }
 +            return false;
 +        } else {
 +            try {
 +                return resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId);
 +            } catch (final NoTransitionException e) {
 +                s_logger.debug("No next resource state for host " + host.getId() + " while current state is " + host.getResourceState() + " with event " +
 +                        ResourceState.Event.UnableToMigrate, e);
 +                return false;
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public List<HostVO> findDirectlyConnectedHosts() {
 +        /* The resource column is not null for direct connected resource */
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getResource(), Op.NNULL);
 +        sc.and(sc.entity().getResourceState(), Op.NIN, ResourceState.Disabled);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpAndEnabledHosts(final Type type, final Long clusterId, final Long podId, final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        if (type != null) {
 +            sc.and(sc.entity().getType(), Op.EQ, type);
 +        }
 +        if (clusterId != null) {
 +            sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
 +        }
 +        if (podId != null) {
 +            sc.and(sc.entity().getPodId(), Op.EQ, podId);
 +        }
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllHosts(final Type type, final Long clusterId, final Long podId, final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        if (type != null) {
 +            sc.and(sc.entity().getType(), Op.EQ, type);
 +        }
 +        if (clusterId != null) {
 +            sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
 +        }
 +        if (podId != null) {
 +            sc.and(sc.entity().getPodId(), Op.EQ, podId);
 +        }
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpHosts(Type type, Long clusterId, Long podId, long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        if (type != null) {
 +            sc.and(sc.entity().getType(), Op.EQ, type);
 +        }
 +        if (clusterId != null) {
 +            sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
 +        }
 +        if (podId != null) {
 +            sc.and(sc.entity().getPodId(), Op.EQ, podId);
 +        }
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpAndEnabledNonHAHosts(final Type type, final Long clusterId, final Long podId, final long dcId) {
 +        final String haTag = _haMgr.getHaTag();
 +        return _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, haTag);
 +    }
 +
 +    @Override
 +    public List<HostVO> findHostByGuid(final long dcId, final String guid) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getGuid(), Op.EQ, guid);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllHostsInCluster(final long clusterId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listHostsInClusterByStatus(final long clusterId, final Status status) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getClusterId(), Op.EQ, clusterId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, status);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpAndEnabledHostsInOneZoneByType(final Type type, final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getType(), Op.EQ, type);
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllNotInMaintenanceHostsInOneZone(final Type type, final Long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        if (dcId != null) {
 +            sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        }
 +        sc.and(sc.entity().getType(), Op.EQ, type);
 +        sc.and(sc.entity().getResourceState(), Op.NIN, ResourceState.Maintenance, ResourceState.ErrorInMaintenance, ResourceState.PrepareForMaintenance,
 +                ResourceState.Error);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllHostsInOneZoneByType(final Type type, final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getType(), Op.EQ, type);
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllHostsInAllZonesByType(final Type type) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getType(), Op.EQ, type);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HypervisorType> listAvailHypervisorInZone(final Long hostId, final Long zoneId) {
 +        final SearchCriteria<String> sc = _hypervisorsInDC.create();
 +        if (zoneId != null) {
 +            sc.setParameters("dataCenter", zoneId);
 +        }
 +        if (hostId != null) {
 +            // exclude the given host, since we want to check what hypervisor is already handled
 +            // in adding this new host
 +            sc.setParameters("id", hostId);
 +        }
 +        sc.setParameters("type", Host.Type.Routing);
 +
 +        // The search is not able to return list of enums, so getting
 +        // list of hypervisors as strings and then converting them to enum
 +        final List<String> hvs = _hostDao.customSearch(sc, null);
 +        final List<HypervisorType> hypervisors = new ArrayList<HypervisorType>();
 +        for (final String hv : hvs) {
 +            hypervisors.add(HypervisorType.getType(hv));
 +        }
 +        return hypervisors;
 +    }
 +
 +    @Override
 +    public HostVO findHostByGuid(final String guid) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getGuid(), Op.EQ, guid);
 +        return sc.find();
 +    }
 +
 +    @Override
 +    public HostVO findHostByName(final String name) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getName(), Op.EQ, name);
 +        return sc.find();
 +    }
 +
 +    @Override
 +    public HostStats getHostStatistics(final long hostId) {
 +        final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(_hostDao.findById(hostId).getGuid(), _hostDao.findById(hostId).getName(), hostId));
 +
 +        if (answer != null && answer instanceof UnsupportedAnswer) {
 +            return null;
 +        }
 +
 +        if (answer == null || !answer.getResult()) {
 +            final String msg = "Unable to obtain host " + hostId + " statistics. ";
 +            s_logger.warn(msg);
 +            return null;
 +        } else {
 +
 +            // now construct the result object
 +            if (answer instanceof GetHostStatsAnswer) {
 +                return ((GetHostStatsAnswer)answer).getHostStats();
 +            }
 +        }
 +        return null;
 +    }
 +
 +    @Override
 +    public Long getGuestOSCategoryId(final long hostId) {
 +        final HostVO host = _hostDao.findById(hostId);
 +        if (host == null) {
 +            return null;
 +        } else {
 +            _hostDao.loadDetails(host);
 +            final DetailVO detail = _hostDetailsDao.findDetail(hostId, "guest.os.category.id");
 +            if (detail == null) {
 +                return null;
 +            } else {
 +                return Long.parseLong(detail.getValue());
 +            }
 +        }
 +    }
 +
 +    @Override
 +    public String getHostTags(final long hostId) {
 +        final List<String> hostTags = _hostTagsDao.gethostTags(hostId);
 +        if (hostTags == null) {
 +            return null;
 +        } else {
 +            return StringUtils.listToCsvTags(hostTags);
 +        }
 +    }
 +
 +    @Override
 +    public List<PodCluster> listByDataCenter(final long dcId) {
 +        final List<HostPodVO> pods = _podDao.listByDataCenterId(dcId);
 +        final ArrayList<PodCluster> pcs = new ArrayList<PodCluster>();
 +        for (final HostPodVO pod : pods) {
 +            final List<ClusterVO> clusters = _clusterDao.listByPodId(pod.getId());
 +            if (clusters.size() == 0) {
 +                pcs.add(new PodCluster(pod, null));
 +            } else {
 +                for (final ClusterVO cluster : clusters) {
 +                    pcs.add(new PodCluster(pod, cluster));
 +                }
 +            }
 +        }
 +        return pcs;
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpAndEnabledHostsInOneZoneByHypervisor(final HypervisorType type, final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getHypervisorType(), Op.EQ, type);
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled);
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public List<HostVO> listAllUpAndEnabledHostsInOneZone(final long dcId) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +
 +        sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled);
 +
 +        return sc.list();
 +    }
 +
 +    @Override
 +    public boolean isHostGpuEnabled(final long hostId) {
 +        final SearchCriteria<HostGpuGroupsVO> sc = _gpuAvailability.create();
 +        sc.setParameters("hostId", hostId);
 +        return _hostGpuGroupsDao.customSearch(sc, null).size() > 0 ? true : false;
 +    }
 +
 +    @Override
 +    public List<HostGpuGroupsVO> listAvailableGPUDevice(final long hostId, final String groupName, final String vgpuType) {
 +        final Filter searchFilter = new Filter(VGPUTypesVO.class, "remainingCapacity", false, null, null);
 +        final SearchCriteria<HostGpuGroupsVO> sc = _gpuAvailability.create();
 +        sc.setParameters("hostId", hostId);
 +        sc.setParameters("groupName", groupName);
 +        sc.setJoinParameters("groupId", "vgpuType", vgpuType);
 +        sc.setJoinParameters("groupId", "remainingCapacity", 0);
 +        return _hostGpuGroupsDao.customSearch(sc, searchFilter);
 +    }
 +
 +    @Override
 +    public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) {
 +        if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) {
 +            return true;
 +        } else {
 +            if (s_logger.isDebugEnabled()) {
 +                s_logger.debug("Host ID: "+ hostId +" does not have GPU device available");
 +            }
 +            return false;
 +        }
 +    }
 +
 +    @Override
 +    public GPUDeviceTO getGPUDevice(final long hostId, final String groupName, final String vgpuType) {
 +        final List<HostGpuGroupsVO> gpuDeviceList = listAvailableGPUDevice(hostId, groupName, vgpuType);
 +
 +        if (CollectionUtils.isEmpty(gpuDeviceList)) {
 +            final String errorMsg = "Host " + hostId + " does not have required GPU device or out of capacity. GPU group: " + groupName + ", vGPU Type: " + vgpuType;
 +            s_logger.error(errorMsg);
 +            throw new CloudRuntimeException(errorMsg);
 +        }
 +
 +        return new GPUDeviceTO(gpuDeviceList.get(0).getGroupName(), vgpuType, null);
 +    }
 +
 +    @Override
 +    public void updateGPUDetails(final long hostId, final HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails) {
 +        // Update GPU group capacity
 +        final TransactionLegacy txn = TransactionLegacy.currentTxn();
 +        txn.start();
 +        _hostGpuGroupsDao.persist(hostId, new ArrayList<String>(groupDetails.keySet()));
 +        _vgpuTypesDao.persist(hostId, groupDetails);
 +        txn.commit();
 +    }
 +
 +    @Override
 +    public HashMap<String, HashMap<String, VgpuTypesInfo>> getGPUStatistics(final HostVO host) {
 +        final Answer answer = _agentMgr.easySend(host.getId(), new GetGPUStatsCommand(host.getGuid(), host.getName()));
 +        if (answer != null && answer instanceof UnsupportedAnswer) {
 +            return null;
 +        }
 +        if (answer == null || !answer.getResult()) {
 +            final String msg = "Unable to obtain GPU stats for host " + host.getName();
 +            s_logger.warn(msg);
 +            return null;
 +        } else {
 +            // now construct the result object
 +            if (answer instanceof GetGPUStatsAnswer) {
 +                return ((GetGPUStatsAnswer)answer).getGroupDetails();
 +            }
 +        }
 +        return null;
 +    }
 +
 +    @Override
 +    public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type) {
 +        final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
 +        sc.and(sc.entity().getHypervisorType(), Op.EQ, type);
 +        sc.and(sc.entity().getType(),Op.EQ, Type.Routing);
 +        sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
 +        sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled);
 +        sc.and(sc.entity().getRemoved(), Op.NULL);
 +        List<HostVO> hosts = sc.list();
 +        if (CollectionUtils.isEmpty(hosts)) {
 +            return null;
 +        } else {
 +            Collections.shuffle(hosts, new Random(System.currentTimeMillis()));
 +            return hosts.get(0);
 +        }
 +    }
 +
 +    @Override
 +    @DB
 +    @ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true)
 +    public boolean releaseHostReservation(final Long hostId) {
 +        try {
 +            return Transaction.execute(new TransactionCallback<Boolean>() {
 +                @Override
 +                public Boolean doInTransaction(final TransactionStatus status) {
 +                    final PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId);
 +                    if (reservationEntry != null) {
 +                        final long id = reservationEntry.getId();
 +                        final PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true);
 +                        if (hostReservation == null) {
 +                            if (s_logger.isDebugEnabled()) {
 +                                s_logger.debug("Host reservation for host: " + hostId + " does not even exist.  Release reservartion call is ignored.");
 +                            }
 +                            return false;
 +                        }
 +                        hostReservation.setResourceUsage(null);
 +                        _plannerHostReserveDao.persist(hostReservation);
 +                        return true;
 +                    }
 +
 +                    if (s_logger.isDebugEnabled()) {
 +                        s_logger.debug("Host reservation for host: " + hostId + " does not even exist.  Release reservartion call is ignored.");
 +                    }
 +
 +                    return false;
 +                }
 +            });
 +        } catch (final CloudRuntimeException e) {
 +            throw e;
 +        } catch (final Throwable t) {
 +            s_logger.error("Unable to release host reservation for host: " + hostId, t);
 +            return false;
 +        }
 +    }
 +
 +    @Override
 +    public String getConfigComponentName() {
 +        return ResourceManagerImpl.class.getSimpleName();
 +    }
 +
 +    @Override
 +    public ConfigKey<?>[] getConfigKeys() {
 +        return new ConfigKey<?>[] {HostMaintenanceRetries};
 +    }
 +}