You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by da...@apache.org on 2020/03/06 07:51:40 UTC

[cloudstack] branch master updated: CloudStack Kubernetes Service (#3680)

This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/master by this push:
     new 8cc70c7  CloudStack Kubernetes Service (#3680)
8cc70c7 is described below

commit 8cc70c7d8725319ba5455c5fe17ede275e372f1c
Author: Abhishek Kumar <ab...@gmail.com>
AuthorDate: Fri Mar 6 13:21:23 2020 +0530

    CloudStack Kubernetes Service (#3680)
---
 .../org/apache/cloudstack/api/ApiConstants.java    |   19 +-
 .../command/user/config/ListCapabilitiesCmd.java   |    2 +
 .../api/command/user/iso/DeleteIsoCmd.java         |    4 +
 .../api/command/user/iso/RegisterIsoCmd.java       |   36 +
 .../api/response/CapabilitiesResponse.java         |   16 +
 client/pom.xml                                     |    5 +
 .../java/com/cloud/network/IpAddressManager.java   |    3 +
 .../resources/META-INF/db/schema-41300to41400.sql  |   71 +
 plugins/integrations/kubernetes-service/pom.xml    |  135 ++
 .../kubernetes/cluster/KubernetesCluster.java      |  134 ++
 .../cluster/KubernetesClusterDetailsVO.java        |   84 ++
 .../cluster/KubernetesClusterEventTypes.java       |   29 +-
 .../cluster/KubernetesClusterManagerImpl.java      | 1500 +++++++++++++++++++
 .../cluster/KubernetesClusterService.java          |  108 ++
 .../kubernetes/cluster/KubernetesClusterVO.java    |  340 +++++
 .../kubernetes/cluster/KubernetesClusterVmMap.java |   21 +-
 .../cluster/KubernetesClusterVmMapVO.java          |   76 +
 .../KubernetesClusterActionWorker.java             |  380 +++++
 .../KubernetesClusterDestroyWorker.java            |  243 +++
 ...ernetesClusterResourceModifierActionWorker.java |  513 +++++++
 .../KubernetesClusterScaleWorker.java              |  431 ++++++
 .../KubernetesClusterStartWorker.java              |  640 ++++++++
 .../actionworkers/KubernetesClusterStopWorker.java |   62 +
 .../KubernetesClusterUpgradeWorker.java            |  169 +++
 .../cluster/dao/KubernetesClusterDao.java          |   25 +-
 .../cluster/dao/KubernetesClusterDaoImpl.java      |  112 ++
 .../cluster/dao/KubernetesClusterDetailsDao.java   |   19 +-
 .../dao/KubernetesClusterDetailsDaoImpl.java       |   23 +-
 .../cluster/dao/KubernetesClusterVmMapDao.java     |   17 +-
 .../cluster/dao/KubernetesClusterVmMapDaoImpl.java |   46 +
 .../cluster/utils/KubernetesClusterUtil.java       |  311 ++++
 .../version/KubernetesSupportedVersion.java        |   50 +-
 .../version/KubernetesSupportedVersionVO.java      |  168 +++
 .../version/KubernetesVersionEventTypes.java       |   15 +-
 .../version/KubernetesVersionManagerImpl.java      |  388 +++++
 .../version/KubernetesVersionService.java          |   36 +
 .../version/dao/KubernetesSupportedVersionDao.java |   18 +-
 .../dao/KubernetesSupportedVersionDaoImpl.java     |   42 +
 .../version/AddKubernetesSupportedVersionCmd.java  |  153 ++
 .../DeleteKubernetesSupportedVersionCmd.java       |  102 +-
 .../UpdateKubernetesSupportedVersionCmd.java       |  103 ++
 .../cluster/CreateKubernetesClusterCmd.java        |  297 ++++
 .../cluster/DeleteKubernetesClusterCmd.java        |  101 +-
 .../cluster/GetKubernetesClusterConfigCmd.java     |   98 ++
 .../cluster/ListKubernetesClustersCmd.java         |  100 ++
 .../cluster/ScaleKubernetesClusterCmd.java         |  128 ++
 .../cluster/StartKubernetesClusterCmd.java         |  120 ++
 .../cluster/StopKubernetesClusterCmd.java          |  108 ++
 .../cluster/UpgradeKubernetesClusterCmd.java       |  118 ++
 .../ListKubernetesSupportedVersionsCmd.java        |  109 ++
 .../response/KubernetesClusterConfigResponse.java  |   61 +
 .../api/response/KubernetesClusterResponse.java    |  329 ++++
 .../KubernetesSupportedVersionResponse.java        |  174 +++
 .../kubernetes-service/module.properties           |   18 +
 .../spring-kubernetes-service-context.xml          |   37 +
 .../src/main/resources/conf/k8s-master-add.yml     |  237 +++
 .../src/main/resources/conf/k8s-master.yml         |  294 ++++
 .../src/main/resources/conf/k8s-node.yml           |  237 +++
 .../main/resources/script/upgrade-kubernetes.sh    |  133 ++
 .../version/KubernetesVersionServiceTest.java      |  253 ++++
 plugins/pom.xml                                    |    1 +
 scripts/util/create-kubernetes-binaries-iso.sh     |  106 ++
 .../api/query/dao/NetworkOfferingJoinDao.java      |   24 +-
 .../api/query/dao/NetworkOfferingJoinDaoImpl.java  |   10 +-
 .../com/cloud/network/IpAddressManagerImpl.java    |  141 +-
 .../com/cloud/server/ManagementServerImpl.java     |    5 +
 test/integration/smoke/test_kubernetes_clusters.py |  729 +++++++++
 .../smoke/test_kubernetes_supported_versions.py    |  278 ++++
 tools/apidoc/gen_toc.py                            |    4 +-
 ui/l10n/en.js                                      |   35 +
 ui/plugins/cks/cks.css                             |   43 +
 ui/plugins/cks/cks.js                              | 1581 ++++++++++++++++++++
 ui/plugins/{plugins.js => cks/config.js}           |   16 +-
 ui/plugins/cks/icon.png                            |  Bin 0 -> 1208 bytes
 ui/plugins/plugins.js                              |    3 +-
 ui/scripts/sharedFunctions.js                      |   25 +-
 76 files changed, 12317 insertions(+), 285 deletions(-)

diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
index 1c6fd8b..384293f 100644
--- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
@@ -225,8 +225,8 @@ public class ApiConstants {
     public static final String LOCK = "lock";
     public static final String LUN = "lun";
     public static final String LBID = "lbruleid";
-    public static final String MAX = "max";
     public static final String MAC_ADDRESS = "macaddress";
+    public static final String MAX = "max";
     public static final String MAX_SNAPS = "maxsnaps";
     public static final String MAX_CPU_NUMBER = "maxcpunumber";
     public static final String MAX_MEMORY = "maxmemory";
@@ -782,6 +782,23 @@ public class ApiConstants {
     public static final String LAST_UPDATED = "lastupdated";
     public static final String PERFORM_FRESH_CHECKS = "performfreshchecks";
 
+    public static final String CONSOLE_END_POINT = "consoleendpoint";
+    public static final String EXTERNAL_LOAD_BALANCER_IP_ADDRESS = "externalloadbalanceripaddress";
+    public static final String DOCKER_REGISTRY_USER_NAME = "dockerregistryusername";
+    public static final String DOCKER_REGISTRY_PASSWORD = "dockerregistrypassword";
+    public static final String DOCKER_REGISTRY_URL = "dockerregistryurl";
+    public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail";
+    public static final String ISO_NAME = "isoname";
+    public static final String ISO_STATE = "isostate";
+    public static final String SEMANTIC_VERSION = "semanticversion";
+    public static final String KUBERNETES_VERSION_ID = "kubernetesversionid";
+    public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname";
+    public static final String MASTER_NODES = "masternodes";
+    public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion";
+    public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid";
+    public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize";
+    public static final String SUPPORTS_HA = "supportsha";
+
     public enum HostDetails {
         all, capacity, events, stats, min;
     }
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java
index 3f1b9a2..566be64 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java
@@ -61,6 +61,8 @@ public class ListCapabilitiesCmd extends BaseCmd {
         response.setAllowUserExpungeRecoverVM((Boolean)capabilities.get("allowUserExpungeRecoverVM"));
         response.setAllowUserExpungeRecoverVolume((Boolean)capabilities.get("allowUserExpungeRecoverVolume"));
         response.setAllowUserViewAllDomainAccounts((Boolean)capabilities.get("allowUserViewAllDomainAccounts"));
+        response.setKubernetesServiceEnabled((Boolean)capabilities.get("kubernetesServiceEnabled"));
+        response.setKubernetesClusterExperimentalFeaturesEnabled((Boolean)capabilities.get("kubernetesClusterExperimentalFeaturesEnabled"));
         if (capabilities.containsKey("apiLimitInterval")) {
             response.setApiLimitInterval((Integer)capabilities.get("apiLimitInterval"));
         }
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
index 103e922..b38a24f 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
@@ -61,6 +61,10 @@ public class DeleteIsoCmd extends BaseAsyncCmd {
         return id;
     }
 
+    public void setId(Long id) {
+        this.id = id;
+    }
+
     public Long getZoneId() {
         return zoneId;
     }
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java
index a06b54f..1c1a767 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java
@@ -127,10 +127,18 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
         return bootable;
     }
 
+    public void setBootable(Boolean bootable) {
+        this.bootable = bootable;
+    }
+
     public String getDisplayText() {
         return displayText;
     }
 
+    public void setDisplayText(String displayText) {
+        this.displayText = displayText;
+    }
+
     public Boolean isFeatured() {
         return featured;
     }
@@ -139,6 +147,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
         return publicIso;
     }
 
+    public void setPublic(Boolean publicIso) {
+        this.publicIso = publicIso;
+    }
+
     public Boolean isExtractable() {
         return extractable;
     }
@@ -147,6 +159,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
         return isoName;
     }
 
+    public void setIsoName(String isoName) {
+        this.isoName = isoName;
+    }
+
     public Long getOsTypeId() {
         return osTypeId;
     }
@@ -155,22 +171,42 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
         return url;
     }
 
+    public void setUrl(String url) {
+        this.url = url;
+    }
+
     public Long getZoneId() {
         return zoneId;
     }
 
+    public void setZoneId(Long zoneId) {
+        this.zoneId = zoneId;
+    }
+
     public Long getDomainId() {
         return domainId;
     }
 
+    public void setDomainId(Long domainId) {
+        this.domainId = domainId;
+    }
+
     public String getAccountName() {
         return accountName;
     }
 
+    public void setAccountName(String accountName) {
+        this.accountName = accountName;
+    }
+
     public String getChecksum() {
         return checksum;
     }
 
+    public void setChecksum(String checksum) {
+        this.checksum = checksum;
+    }
+
     public String getImageStoreUuid() {
         return imageStoreUuid;
     }
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java
index 352f559..26b3fd5 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java
@@ -92,6 +92,14 @@ public class CapabilitiesResponse extends BaseResponse {
     @Param(description = "true if users can see all accounts within the same domain, false otherwise")
     private boolean allowUserViewAllDomainAccounts;
 
+    @SerializedName("kubernetesserviceenabled")
+    @Param(description = "true if Kubernetes Service plugin is enabled, false otherwise")
+    private boolean kubernetesServiceEnabled;
+
+    @SerializedName("kubernetesclusterexperimentalfeaturesenabled")
+    @Param(description = "true if experimental features for Kubernetes cluster such as Docker private registry are enabled, false otherwise")
+    private boolean kubernetesClusterExperimentalFeaturesEnabled;
+
     public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) {
         this.securityGroupsEnabled = securityGroupsEnabled;
     }
@@ -159,4 +167,12 @@ public class CapabilitiesResponse extends BaseResponse {
     public void setAllowUserViewAllDomainAccounts(boolean allowUserViewAllDomainAccounts) {
         this.allowUserViewAllDomainAccounts = allowUserViewAllDomainAccounts;
     }
+
+    public void setKubernetesServiceEnabled(boolean kubernetesServiceEnabled) {
+        this.kubernetesServiceEnabled = kubernetesServiceEnabled;
+    }
+
+    public void setKubernetesClusterExperimentalFeaturesEnabled(boolean kubernetesClusterExperimentalFeaturesEnabled) {
+        this.kubernetesClusterExperimentalFeaturesEnabled = kubernetesClusterExperimentalFeaturesEnabled;
+    }
 }
diff --git a/client/pom.xml b/client/pom.xml
index 29ecdec..bd58b05 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -483,6 +483,11 @@
             <artifactId>cloud-plugin-backup-dummy</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-plugin-integrations-kubernetes-service</artifactId>
+            <version>${project.version}</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
index 229248a..d4c5bf6 100644
--- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
+++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java
@@ -175,6 +175,9 @@ public interface IpAddressManager {
     PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, boolean isSystem)
             throws InsufficientAddressCapacityException;
 
+    PublicIp getAvailablePublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, boolean isSystem)
+            throws InsufficientAddressCapacityException;
+
     @DB
     void allocateNicValues(NicProfile nic, DataCenter dc, VirtualMachineProfile vm, Network network, String requestedIpv4, String requestedIpv6)
             throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql b/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql
index db7482b..2c9c7ab 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql
@@ -304,3 +304,74 @@ CREATE TABLE  `cloud`.`router_health_check` (
   UNIQUE `i_router_health_checks__router_id__check_name__check_type`(`router_id`, `check_name`, `check_type`),
   INDEX `i_router_health_checks__router_id`(`router_id`)
 ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
+
+-- Kubernetes service
+CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_supported_version` (
+    `id` bigint unsigned NOT NULL auto_increment,
+    `uuid` varchar(40) DEFAULT NULL,
+    `name` varchar(255) NOT NULL COMMENT 'the name of this Kubernetes version',
+    `semantic_version` varchar(32) NOT NULL COMMENT 'the semantic version for this Kubernetes version',
+    `iso_id` bigint unsigned NOT NULL COMMENT 'the ID of the binaries ISO for this Kubernetes version',
+    `zone_id` bigint unsigned DEFAULT NULL COMMENT 'the ID of the zone for which this Kubernetes version is made available',
+    `state` char(32) DEFAULT NULL COMMENT 'the enabled or disabled state for this Kubernetes version',
+    `min_cpu` int(10) unsigned NOT NULL COMMENT 'the minimum CPU needed by cluster nodes for using this Kubernetes version',
+    `min_ram_size` bigint(20) unsigned NOT NULL COMMENT 'the minimum RAM in MB needed by cluster nodes for this Kubernetes version',
+    `created` datetime NOT NULL COMMENT 'date created',
+    `removed` datetime COMMENT 'date removed or null, if still present',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `fk_kubernetes_supported_version__iso_id` FOREIGN KEY `fk_kubernetes_supported_version__iso_id`(`iso_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_kubernetes_supported_version__zone_id` FOREIGN KEY `fk_kubernetes_supported_version__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster` (
+    `id` bigint unsigned NOT NULL auto_increment,
+    `uuid` varchar(40) DEFAULT NULL,
+    `name` varchar(255) NOT NULL,
+    `description` varchar(4096) COMMENT 'display text for this Kubernetes cluster',
+    `zone_id` bigint unsigned NOT NULL COMMENT 'the ID of the zone in which this Kubernetes cluster is deployed',
+    `kubernetes_version_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes version of this Kubernetes cluster',
+    `service_offering_id` bigint unsigned COMMENT 'service offering id for the cluster VM',
+    `template_id` bigint unsigned COMMENT 'the ID of the template used by this Kubernetes cluster',
+    `network_id` bigint unsigned COMMENT 'the ID of the network used by this Kubernetes cluster',
+    `master_node_count` bigint NOT NULL default '0' COMMENT 'the number of the master nodes deployed for this Kubernetes cluster',
+    `node_count` bigint NOT NULL default '0' COMMENT 'the number of the worker nodes deployed for this Kubernetes cluster',
+    `account_id` bigint unsigned NOT NULL COMMENT 'the ID of owner account of this Kubernetes cluster',
+    `domain_id` bigint unsigned NOT NULL COMMENT 'the ID of the domain of this cluster',
+    `state` char(32) NOT NULL COMMENT 'the current state of this Kubernetes cluster',
+    `key_pair` varchar(40),
+    `cores` bigint unsigned NOT NULL COMMENT 'total number of CPU cores used by this Kubernetes cluster',
+    `memory` bigint unsigned NOT NULL COMMENT 'total memory used by this Kubernetes cluster',
+    `node_root_disk_size` bigint(20) unsigned DEFAULT 0 COMMENT 'root disk size of root disk for each node',
+    `endpoint` varchar(255) COMMENT 'url endpoint of the Kubernetes cluster manager api access',
+    `created` datetime NOT NULL COMMENT 'date created',
+    `removed` datetime COMMENT 'date removed or null, if still present',
+    `gc` tinyint unsigned NOT NULL DEFAULT 1 COMMENT 'gc this Kubernetes cluster or not',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `fk_cluster__zone_id` FOREIGN KEY `fk_cluster__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__kubernetes_version_id` FOREIGN KEY `fk_cluster__kubernetes_version_id`(`kubernetes_version_id`) REFERENCES `kubernetes_supported_version` (`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__service_offering_id` FOREIGN KEY `fk_cluster__service_offering_id`(`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__template_id` FOREIGN KEY `fk_cluster__template_id`(`template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__network_id` FOREIGN KEY `fk_cluster__network_id`(`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_vm_map` (
+    `id` bigint unsigned NOT NULL auto_increment,
+    `cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster',
+    `vm_id` bigint unsigned NOT NULL COMMENT 'the ID of the VM',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `fk_kubernetes_cluster_vm_map__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_vm_map__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_details` (
+    `id` bigint unsigned NOT NULL auto_increment,
+    `cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster',
+    `name` varchar(255) NOT NULL,
+    `value` varchar(10240) NOT NULL,
+    `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user else false',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `fk_kubernetes_cluster_details__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_details__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
diff --git a/plugins/integrations/kubernetes-service/pom.xml b/plugins/integrations/kubernetes-service/pom.xml
new file mode 100644
index 0000000..9fb2a43
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/pom.xml
@@ -0,0 +1,135 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+         http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>cloud-plugin-integrations-kubernetes-service</artifactId>
+    <name>Apache CloudStack Plugin - Kubernetes Service</name>
+    <parent>
+        <groupId>org.apache.cloudstack</groupId>
+        <artifactId>cloudstack-plugins</artifactId>
+        <version>4.14.0.0-SNAPSHOT</version>
+        <relativePath>../../pom.xml</relativePath>
+    </parent>
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-framework-db</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-framework-ca</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-framework-security</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-engine-schema</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-engine-api</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-engine-components-api</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.cloudstack</groupId>
+            <artifactId>cloud-framework-managed-context</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.persistence</groupId>
+            <artifactId>javax.persistence</artifactId>
+            <version>${cs.jpa.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+            <version>${cs.gson.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>${cs.guava.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+            <version>${cs.log4j.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-context</artifactId>
+            <version>${org.springframework.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-aop</artifactId>
+            <version>${org.springframework.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-beans</artifactId>
+            <version>${org.springframework.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-test</artifactId>
+            <version>${org.springframework.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>commons-codec</groupId>
+            <artifactId>commons-codec</artifactId>
+            <version>${cs.codec.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.hamcrest</groupId>
+            <artifactId>hamcrest-library</artifactId>
+            <version>${cs.hamcrest.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.bouncycastle</groupId>
+            <artifactId>bcprov-jdk15on</artifactId>
+            <version>${cs.bcprov.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>joda-time</groupId>
+            <artifactId>joda-time</artifactId>
+            <version>${cs.joda-time.version}</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java
new file mode 100644
index 0000000..aef304a
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java
@@ -0,0 +1,134 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+import java.util.Date;
+
+import org.apache.cloudstack.acl.ControlledEntity;
+import org.apache.cloudstack.api.Displayable;
+import org.apache.cloudstack.api.Identity;
+import org.apache.cloudstack.api.InternalIdentity;
+
+import com.cloud.utils.fsm.StateMachine2;
+
+/**
+ * KubernetesCluster describes the properties of a Kubernetes cluster
+ * StateMachine maintains its states.
+ *
+ */
+public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm.StateObject<KubernetesCluster.State>, Identity, InternalIdentity, Displayable {
+
+    enum Event {
+        StartRequested,
+        StopRequested,
+        DestroyRequested,
+        RecoveryRequested,
+        ScaleUpRequested,
+        ScaleDownRequested,
+        UpgradeRequested,
+        OperationSucceeded,
+        OperationFailed,
+        CreateFailed,
+        FaultsDetected;
+    }
+
+    enum State {
+        Created("Initial State of Kubernetes cluster. At this state its just a logical/DB entry with no resources consumed"),
+        Starting("Resources needed for Kubernetes cluster are being provisioned"),
+        Running("Necessary resources are provisioned and Kubernetes cluster is in operational ready state to launch Kubernetes"),
+        Stopping("Resources for the Kubernetes cluster are being destroyed"),
+        Stopped("All resources for the Kubernetes cluster are destroyed, Kubernetes cluster may still have ephemeral resource like persistent volumes provisioned"),
+        Scaling("Transient state in which resources are either getting scaled up/down"),
+        Upgrading("Transient state in which cluster is getting upgraded"),
+        Alert("State to represent Kubernetes clusters which are not in expected desired state (operationally in active control place, stopped cluster VM's etc)."),
+        Recovering("State in which Kubernetes cluster is recovering from alert state"),
+        Destroyed("End state of Kubernetes cluster in which all resources are destroyed, cluster will not be usable further"),
+        Destroying("State in which resources for the Kubernetes cluster is getting cleaned up or yet to be cleaned up by garbage collector"),
+        Error("State of the failed to create Kubernetes clusters");
+
+        protected static final StateMachine2<State, KubernetesCluster.Event, KubernetesCluster> s_fsm = new StateMachine2<State, KubernetesCluster.Event, KubernetesCluster>();
+
+        public static StateMachine2<State, KubernetesCluster.Event, KubernetesCluster> getStateMachine() { return s_fsm; }
+
+        static {
+            s_fsm.addTransition(State.Created, Event.StartRequested, State.Starting);
+
+            s_fsm.addTransition(State.Starting, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Starting, Event.OperationFailed, State.Alert);
+            s_fsm.addTransition(State.Starting, Event.CreateFailed, State.Error);
+            s_fsm.addTransition(State.Starting, Event.StopRequested, State.Stopping);
+
+            s_fsm.addTransition(State.Running, Event.StopRequested, State.Stopping);
+            s_fsm.addTransition(State.Alert, Event.StopRequested, State.Stopping);
+            s_fsm.addTransition(State.Stopping, Event.OperationSucceeded, State.Stopped);
+            s_fsm.addTransition(State.Stopping, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Stopped, Event.StartRequested, State.Starting);
+
+            s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert);
+
+            s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling);
+            s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling);
+            s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Scaling, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Running, Event.UpgradeRequested, State.Upgrading);
+            s_fsm.addTransition(State.Upgrading, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Upgrading, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Alert, Event.RecoveryRequested, State.Recovering);
+            s_fsm.addTransition(State.Recovering, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Recovering, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Running, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Stopped, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Alert, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Error, Event.DestroyRequested, State.Destroying);
+
+            s_fsm.addTransition(State.Destroying, Event.OperationSucceeded, State.Destroyed);
+
+        }
+        String _description;
+
+        State(String description) {
+             _description = description;
+        }
+    }
+
+    long getId();
+    String getName();
+    String getDescription();
+    long getZoneId();
+    long getKubernetesVersionId();
+    long getServiceOfferingId();
+    long getTemplateId();
+    long getNetworkId();
+    long getDomainId();
+    long getAccountId();
+    long getMasterNodeCount();
+    long getNodeCount();
+    long getTotalNodeCount();
+    String getKeyPair();
+    long getCores();
+    long getMemory();
+    long getNodeRootDiskSize();
+    String getEndpoint();
+    boolean isCheckForGc();
+    @Override
+    State getState();
+    Date getCreated();
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java
new file mode 100644
index 0000000..30b2864
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java
@@ -0,0 +1,84 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+import org.apache.cloudstack.api.ResourceDetail;
+
+@Entity
+@Table(name = "kubernetes_cluster_details")
+public class KubernetesClusterDetailsVO implements ResourceDetail {
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    private long id;
+
+    @Column(name = "cluster_id")
+    private long resourceId;
+
+    @Column(name = "name")
+    private String name;
+
+    @Column(name = "value", length = 10240)
+    private String value;
+
+    @Column(name = "display")
+    private boolean display;
+
+    public KubernetesClusterDetailsVO() {
+    }
+
+    public KubernetesClusterDetailsVO(long id, String name, String value, boolean display) {
+        this.resourceId = id;
+        this.name = name;
+        this.value = value;
+        this.display = display;
+    }
+
+    @Override
+    public long getId() {
+        return id;
+    }
+
+    @Override
+    public String getName() {
+        return name;
+    }
+
+    @Override
+    public String getValue() {
+        return value;
+    }
+
+    @Override
+    public long getResourceId() {
+        return resourceId;
+    }
+
+    @Override
+    public boolean isDisplay() {
+        return display;
+    }
+}
diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java
old mode 100644
new mode 100755
similarity index 55%
copy from server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java
index 362cabb..a947e42
--- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java
@@ -14,24 +14,13 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-
-package com.cloud.api.query.dao;
-
-import java.util.List;
-
-import org.apache.cloudstack.api.response.NetworkOfferingResponse;
-
-import com.cloud.api.query.vo.NetworkOfferingJoinVO;
-import com.cloud.offering.NetworkOffering;
-import com.cloud.utils.db.GenericDao;
-
-public interface NetworkOfferingJoinDao extends GenericDao<NetworkOfferingJoinVO, Long> {
-
-    List<NetworkOfferingJoinVO> findByDomainId(long domainId);
-
-    List<NetworkOfferingJoinVO> findByZoneId(long zoneId);
-
-    NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering nof);
-
-    NetworkOfferingJoinVO newNetworkOfferingView(NetworkOffering nof);
+package com.cloud.kubernetes.cluster;
+
+public class KubernetesClusterEventTypes {
+    public static final String EVENT_KUBERNETES_CLUSTER_CREATE = "KUBERNETES.CLUSTER.CREATE";
+    public static final String EVENT_KUBERNETES_CLUSTER_DELETE = "KUBERNETES.CLUSTER.DELETE";
+    public static final String EVENT_KUBERNETES_CLUSTER_START = "KUBERNETES.CLUSTER.START";
+    public static final String EVENT_KUBERNETES_CLUSTER_STOP = "KUBERNETES.CLUSTER.STOP";
+    public static final String EVENT_KUBERNETES_CLUSTER_SCALE = "KUBERNETES.CLUSTER.SCALE";
+    public static final String EVENT_KUBERNETES_CLUSTER_UPGRADE = "KUBERNETES.CLUSTER.UPGRADE";
 }
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java
new file mode 100644
index 0000000..358fa03
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java
@@ -0,0 +1,1500 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+import java.math.BigInteger;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.acl.ControlledEntity;
+import org.apache.cloudstack.acl.SecurityChecker;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.StopKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd;
+import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+import com.cloud.api.ApiDBUtils;
+import com.cloud.api.query.dao.NetworkOfferingJoinDao;
+import com.cloud.api.query.dao.TemplateJoinDao;
+import com.cloud.api.query.vo.NetworkOfferingJoinVO;
+import com.cloud.api.query.vo.TemplateJoinVO;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.domain.Domain;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.PermissionDeniedException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.host.Host.Type;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterActionWorker;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterDestroyWorker;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterScaleWorker;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterStartWorker;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterStopWorker;
+import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterUpgradeWorker;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
+import com.cloud.kubernetes.version.KubernetesVersionManagerImpl;
+import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.Network.Service;
+import com.cloud.network.NetworkModel;
+import com.cloud.network.NetworkService;
+import com.cloud.network.Networks;
+import com.cloud.network.PhysicalNetwork;
+import com.cloud.network.dao.FirewallRulesDao;
+import com.cloud.network.dao.NetworkDao;
+import com.cloud.network.dao.NetworkVO;
+import com.cloud.network.dao.PhysicalNetworkDao;
+import com.cloud.network.rules.FirewallRule;
+import com.cloud.network.rules.FirewallRuleVO;
+import com.cloud.offering.NetworkOffering;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.offerings.NetworkOfferingServiceMapVO;
+import com.cloud.offerings.NetworkOfferingVO;
+import com.cloud.offerings.dao.NetworkOfferingDao;
+import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
+import com.cloud.org.Cluster;
+import com.cloud.org.Grouping;
+import com.cloud.projects.Project;
+import com.cloud.resource.ResourceManager;
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VMTemplateZoneVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateZoneDao;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.AccountService;
+import com.cloud.user.SSHKeyPairVO;
+import com.cloud.user.dao.SSHKeyPairDao;
+import com.cloud.utils.Pair;
+import com.cloud.utils.Ternary;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
+import com.cloud.utils.db.TransactionCallbackNoReturn;
+import com.cloud.utils.db.TransactionStatus;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.fsm.NoTransitionException;
+import com.cloud.utils.fsm.StateMachine2;
+import com.cloud.utils.net.NetUtils;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterManagerImpl extends ManagerBase implements KubernetesClusterService {
+
+    private static final Logger LOGGER = Logger.getLogger(KubernetesClusterManagerImpl.class);
+    private static final String DEFAULT_NETWORK_OFFERING_FOR_KUBERNETES_SERVICE_NAME = "DefaultNetworkOfferingforKubernetesService";
+
+    protected StateMachine2<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> _stateMachine = KubernetesCluster.State.getStateMachine();
+
+    ScheduledExecutorService _gcExecutor;
+    ScheduledExecutorService _stateScanner;
+
+    @Inject
+    public KubernetesClusterDao kubernetesClusterDao;
+    @Inject
+    public KubernetesClusterVmMapDao kubernetesClusterVmMapDao;
+    @Inject
+    public KubernetesClusterDetailsDao kubernetesClusterDetailsDao;
+    @Inject
+    public KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
+    @Inject
+    protected SSHKeyPairDao sshKeyPairDao;
+    @Inject
+    protected DataCenterDao dataCenterDao;
+    @Inject
+    protected ClusterDao clusterDao;
+    @Inject
+    protected ClusterDetailsDao clusterDetailsDao;
+    @Inject
+    protected ServiceOfferingDao serviceOfferingDao;
+    @Inject
+    protected VMTemplateDao templateDao;
+    @Inject
+    protected VMTemplateZoneDao templateZoneDao;
+    @Inject
+    protected TemplateJoinDao templateJoinDao;
+    @Inject
+    protected AccountService accountService;
+    @Inject
+    protected AccountManager accountManager;
+    @Inject
+    protected VMInstanceDao vmInstanceDao;
+    @Inject
+    protected UserVmDao userVmDao;
+    @Inject
+    protected NetworkOfferingDao networkOfferingDao;
+    @Inject
+    protected NetworkOfferingJoinDao networkOfferingJoinDao;
+    @Inject
+    protected NetworkOfferingServiceMapDao networkOfferingServiceMapDao;
+    @Inject
+    protected NetworkService networkService;
+    @Inject
+    protected NetworkModel networkModel;
+    @Inject
+    protected PhysicalNetworkDao physicalNetworkDao;
+    @Inject
+    protected NetworkOrchestrationService networkMgr;
+    @Inject
+    protected NetworkDao networkDao;
+    @Inject
+    protected CapacityManager capacityManager;
+    @Inject
+    protected ResourceManager resourceManager;
+    @Inject
+    protected FirewallRulesDao firewallRulesDao;
+
+    private void logMessage(final Level logLevel, final String message, final Exception e) {
+        if (logLevel == Level.WARN) {
+            if (e != null) {
+                LOGGER.warn(message, e);
+            } else {
+                LOGGER.warn(message);
+            }
+        } else {
+            if (e != null) {
+                LOGGER.error(message, e);
+            } else {
+                LOGGER.error(message);
+            }
+        }
+    }
+
+    private void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
+        logMessage(logLevel, message, e);
+        if (kubernetesClusterId != null && event != null) {
+            stateTransitTo(kubernetesClusterId, event);
+        }
+        if (e == null) {
+            throw new CloudRuntimeException(message);
+        }
+        throw new CloudRuntimeException(message, e);
+    }
+
+    private void logAndThrow(final Level logLevel, final String message) throws CloudRuntimeException {
+        logTransitStateAndThrow(logLevel, message, null, null, null);
+    }
+
+    private void logAndThrow(final Level logLevel, final String message, final Exception ex) throws CloudRuntimeException {
+        logTransitStateAndThrow(logLevel, message, null, null, ex);
+    }
+
+    private boolean isKubernetesServiceTemplateConfigured(DataCenter zone) {
+        // Check Kubernetes VM template for zone
+        boolean isHyperVAvailable = false;
+        boolean isKVMAvailable = false;
+        boolean isVMwareAvailable = false;
+        boolean isXenserverAvailable = false;
+        List<ClusterVO> clusters = clusterDao.listByZoneId(zone.getId());
+        for (ClusterVO clusterVO : clusters) {
+            if (Hypervisor.HypervisorType.Hyperv.equals(clusterVO.getHypervisorType())) {
+                isHyperVAvailable = true;
+            }
+            if (Hypervisor.HypervisorType.KVM.equals(clusterVO.getHypervisorType())) {
+                isKVMAvailable = true;
+            }
+            if (Hypervisor.HypervisorType.VMware.equals(clusterVO.getHypervisorType())) {
+                isVMwareAvailable = true;
+            }
+            if (Hypervisor.HypervisorType.XenServer.equals(clusterVO.getHypervisorType())) {
+                isXenserverAvailable = true;
+            }
+        }
+        List<Pair<String, String>> templatePairs = new ArrayList<>();
+        if (isHyperVAvailable) {
+            templatePairs.add(new Pair<>(KubernetesClusterHyperVTemplateName.key(), KubernetesClusterHyperVTemplateName.value()));
+        }
+        if (isKVMAvailable) {
+            templatePairs.add(new Pair<>(KubernetesClusterKVMTemplateName.key(), KubernetesClusterKVMTemplateName.value()));
+        }
+        if (isVMwareAvailable) {
+            templatePairs.add(new Pair<>(KubernetesClusterVMwareTemplateName.key(), KubernetesClusterVMwareTemplateName.value()));
+        }
+        if (isXenserverAvailable) {
+            templatePairs.add(new Pair<>(KubernetesClusterXenserverTemplateName.key(), KubernetesClusterXenserverTemplateName.value()));
+        }
+        for (Pair<String, String> templatePair : templatePairs) {
+            String templateKey = templatePair.first();
+            String templateName = templatePair.second();
+            if (Strings.isNullOrEmpty(templateName)) {
+                LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey));
+                return false;
+            }
+            final VMTemplateVO template = templateDao.findByTemplateName(templateName);
+            if (template == null) {
+                LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName));
+                return false;
+            }
+            List<VMTemplateZoneVO> listZoneTemplate = templateZoneDao.listByZoneTemplate(zone.getId(), template.getId());
+            if (listZoneTemplate == null || listZoneTemplate.isEmpty()) {
+                LOGGER.warn(String.format("The template ID: %s, name: %s is not available for use in zone ID: %s provisioning Kubernetes cluster nodes", template.getUuid(), templateName, zone.getUuid()));
+                return false;
+            }
+        }
+        return true;
+    }
+
+    private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) {
+        // Check network offering
+        String networkOfferingName = KubernetesClusterNetworkOffering.value();
+        if (networkOfferingName == null || networkOfferingName.isEmpty()) {
+            LOGGER.warn(String.format("Global setting %s is empty. Admin has not yet specified the network offering to be used for provisioning isolated network for the cluster", KubernetesClusterNetworkOffering.key()));
+            return false;
+        }
+        NetworkOfferingVO networkOffering = networkOfferingDao.findByUniqueName(networkOfferingName);
+        if (networkOffering == null) {
+            LOGGER.warn(String.format("Unable to find the network offering %s to be used for provisioning Kubernetes cluster", networkOfferingName));
+            return false;
+        }
+        if (networkOffering.getState() == NetworkOffering.State.Disabled) {
+            LOGGER.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid()));
+            return false;
+        }
+        List<String> services = networkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId());
+        if (services == null || services.isEmpty() || !services.contains("SourceNat")) {
+            LOGGER.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid()));
+            return false;
+        }
+        if (!networkOffering.isEgressDefaultPolicy()) {
+            LOGGER.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid()));
+            return false;
+        }
+        boolean offeringAvailableForZone = false;
+        List<NetworkOfferingJoinVO> networkOfferingJoinVOs = networkOfferingJoinDao.findByZoneId(zone.getId(), true);
+        for (NetworkOfferingJoinVO networkOfferingJoinVO : networkOfferingJoinVOs) {
+            if (networkOffering.getId() == networkOfferingJoinVO.getId()) {
+                offeringAvailableForZone = true;
+                break;
+            }
+        }
+        if (!offeringAvailableForZone) {
+            LOGGER.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid()));
+            return false;
+        }
+        long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType());
+        PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId);
+        if (physicalNetwork == null) {
+            LOGGER.warn(String.format("Unable to find physical network with tag: %s", networkOffering.getTags()));
+            return false;
+        }
+        return true;
+    }
+
+    private boolean isKubernetesServiceConfigured(DataCenter zone) {
+        if (!isKubernetesServiceTemplateConfigured(zone)) {
+            return false;
+        }
+        if (!isKubernetesServiceNetworkOfferingConfigured(zone)) {
+            return false;
+        }
+        return true;
+    }
+
+    private IpAddress getSourceNatIp(Network network) {
+        List<? extends IpAddress> addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
+        if (CollectionUtils.isEmpty(addresses)) {
+            return null;
+        }
+        for (IpAddress address : addresses) {
+            if (address.isSourceNat()) {
+                return address;
+            }
+        }
+        return null;
+    }
+
+    private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) {
+        String tempalteName = null;
+        switch (hypervisorType) {
+            case Hyperv:
+                tempalteName = KubernetesClusterHyperVTemplateName.value();
+                break;
+            case KVM:
+                tempalteName = KubernetesClusterKVMTemplateName.value();
+                break;
+            case VMware:
+                tempalteName = KubernetesClusterVMwareTemplateName.value();
+                break;
+            case XenServer:
+                tempalteName = KubernetesClusterXenserverTemplateName.value();
+                break;
+        }
+        return templateDao.findByTemplateName(tempalteName);
+    }
+
+    private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) {
+        if (Network.GuestType.Isolated.equals(network.getGuestType())) {
+            if (Network.State.Allocated.equals(network.getState())) { // Allocated networks won't have IP and rules
+                return true;
+            }
+            IpAddress sourceNatIp = getSourceNatIp(network);
+            if (sourceNatIp == null) {
+                throw new InvalidParameterValueException(String.format("Network ID: %s does not have a source NAT IP associated with it. To provision a Kubernetes Cluster, source NAT IP is required", network.getUuid()));
+            }
+            List<FirewallRuleVO> rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.Firewall);
+            for (FirewallRuleVO rule : rules) {
+                Integer startPort = rule.getSourcePortStart();
+                Integer endPort = rule.getSourcePortEnd();
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Network rule : " + startPort + " " + endPort);
+                }
+                if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) {
+                    throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for API access", network.getUuid()));
+                }
+                if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) {
+                    throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for node VM SSH access", network.getUuid()));
+                }
+            }
+            rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.PortForwarding);
+            for (FirewallRuleVO rule : rules) {
+                Integer startPort = rule.getSourcePortStart();
+                Integer endPort = rule.getSourcePortEnd();
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug("Network rule : " + startPort + " " + endPort);
+                }
+                if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) {
+                    throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for API access", network.getUuid()));
+                }
+                if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) {
+                    throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for node VM SSH access", network.getUuid()));
+                }
+            }
+        }
+        return true;
+    }
+
+    private boolean validateNetwork(Network network, int clusterTotalNodeCount) {
+        NetworkOffering networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId());
+        if (networkOffering.isSystemOnly()) {
+            throw new InvalidParameterValueException(String.format("Network ID: %s is for system use only", network.getUuid()));
+        }
+        if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.UserData)) {
+            throw new InvalidParameterValueException(String.format("Network ID: %s does not support userdata that is required for Kubernetes cluster", network.getUuid()));
+        }
+        if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)) {
+            throw new InvalidParameterValueException(String.format("Network ID: %s does not support firewall that is required for Kubernetes cluster", network.getUuid()));
+        }
+        if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.PortForwarding)) {
+            throw new InvalidParameterValueException(String.format("Network ID: %s does not support port forwarding that is required for Kubernetes cluster", network.getUuid()));
+        }
+        if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)) {
+            throw new InvalidParameterValueException(String.format("Network ID: %s does not support DHCP that is required for Kubernetes cluster", network.getUuid()));
+        }
+        validateIsolatedNetwork(network, clusterTotalNodeCount);
+        return true;
+    }
+
+    private boolean validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) {
+        if (serviceOffering.isDynamic()) {
+            throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid()));
+        }
+        if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
+        }
+        if (serviceOffering.getCpu() < version.getMinimumCpu()) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes version ID: %s needs minimum %d vCPUs", serviceOffering.getUuid(), version.getUuid(), version.getMinimumCpu()));
+        }
+        if (serviceOffering.getRamSize() < version.getMinimumRamSize()) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM", serviceOffering.getUuid(), version.getUuid(), version.getMinimumRamSize()));
+        }
+        return true;
+    }
+
+    private void validateDockerRegistryParams(final String dockerRegistryUserName,
+                                              final String dockerRegistryPassword,
+                                              final String dockerRegistryUrl,
+                                              final String dockerRegistryEmail) {
+        // if no params related to docker registry specified then nothing to validate so return true
+        if ((dockerRegistryUserName == null || dockerRegistryUserName.isEmpty()) &&
+                (dockerRegistryPassword == null || dockerRegistryPassword.isEmpty()) &&
+                (dockerRegistryUrl == null || dockerRegistryUrl.isEmpty()) &&
+                (dockerRegistryEmail == null || dockerRegistryEmail.isEmpty())) {
+            return;
+        }
+
+        // all params related to docker registry must be specified or nothing
+        if (!((dockerRegistryUserName != null && !dockerRegistryUserName.isEmpty()) &&
+                (dockerRegistryPassword != null && !dockerRegistryPassword.isEmpty()) &&
+                (dockerRegistryUrl != null && !dockerRegistryUrl.isEmpty()) &&
+                (dockerRegistryEmail != null && !dockerRegistryEmail.isEmpty()))) {
+            throw new InvalidParameterValueException("All the docker private registry parameters (username, password, url, email) required are specified");
+        }
+
+        try {
+            URL url = new URL(dockerRegistryUrl);
+        } catch (MalformedURLException e) {
+            throw new InvalidParameterValueException("Invalid docker registry url specified");
+        }
+
+        Pattern VALID_EMAIL_ADDRESS_REGEX = Pattern.compile("^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}$", Pattern.CASE_INSENSITIVE);
+        Matcher matcher = VALID_EMAIL_ADDRESS_REGEX.matcher(dockerRegistryEmail);
+        if (!matcher.find()) {
+            throw new InvalidParameterValueException("Invalid docker registry email specified");
+        }
+    }
+
+    private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
+        final int cpu_requested = offering.getCpu() * offering.getSpeed();
+        final long ram_requested = offering.getRamSize() * 1024L * 1024L;
+        List<HostVO> hosts = resourceManager.listAllHostsInOneZoneByType(Type.Routing, zone.getId());
+        final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
+        for (HostVO h : hosts) {
+            hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
+        }
+        boolean suitable_host_found = false;
+        Cluster planCluster = null;
+        for (int i = 1; i <= nodesCount + 1; i++) {
+            suitable_host_found = false;
+            for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
+                Pair<HostVO, Integer> hp = hostEntry.getValue();
+                HostVO h = hp.first();
+                int reserved = hp.second();
+                reserved++;
+                ClusterVO cluster = clusterDao.findById(h.getClusterId());
+                ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
+                ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
+                Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+                Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved));
+                }
+                if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved));
+                    }
+                    hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
+                    suitable_host_found = true;
+                    planCluster = cluster;
+                    break;
+                }
+            }
+            if (!suitable_host_found) {
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d", zone.getUuid(), i));
+                }
+                break;
+            }
+        }
+        if (suitable_host_found) {
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid()));
+            }
+            return new DeployDestination(zone, null, planCluster, null);
+        }
+        String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%1$s memory=%2$s)",
+                cpu_requested * nodesCount, ram_requested * nodesCount);
+        LOGGER.warn(msg);
+        throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
+    }
+
+    @Override
+    public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId) {
+        KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        KubernetesClusterResponse response = new KubernetesClusterResponse();
+        response.setObjectName(KubernetesCluster.class.getSimpleName().toLowerCase());
+        response.setId(kubernetesCluster.getUuid());
+        response.setName(kubernetesCluster.getName());
+        response.setDescription(kubernetesCluster.getDescription());
+        DataCenterVO zone = ApiDBUtils.findZoneById(kubernetesCluster.getZoneId());
+        response.setZoneId(zone.getUuid());
+        response.setZoneName(zone.getName());
+        response.setMasterNodes(kubernetesCluster.getMasterNodeCount());
+        response.setClusterSize(kubernetesCluster.getNodeCount());
+        VMTemplateVO template = ApiDBUtils.findTemplateById(kubernetesCluster.getTemplateId());
+        response.setTemplateId(template.getUuid());
+        ServiceOfferingVO offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        response.setServiceOfferingId(offering.getUuid());
+        response.setServiceOfferingName(offering.getName());
+        KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
+        if (version != null) {
+            response.setKubernetesVersionId(version.getUuid());
+            response.setKubernetesVersionName(version.getName());
+        }
+        Account account = ApiDBUtils.findAccountById(kubernetesCluster.getAccountId());
+        if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) {
+            Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId());
+            response.setProjectId(project.getUuid());
+            response.setProjectName(project.getName());
+        } else {
+            response.setAccountName(account.getAccountName());
+        }
+        Domain domain = ApiDBUtils.findDomainById(kubernetesCluster.getDomainId());
+        response.setDomainId(domain.getUuid());
+        response.setDomainName(domain.getName());
+        response.setKeypair(kubernetesCluster.getKeyPair());
+        response.setState(kubernetesCluster.getState().toString());
+        response.setCores(String.valueOf(kubernetesCluster.getCores()));
+        response.setMemory(String.valueOf(kubernetesCluster.getMemory()));
+        NetworkVO ntwk = networkDao.findByIdIncludingRemoved(kubernetesCluster.getNetworkId());
+        response.setEndpoint(kubernetesCluster.getEndpoint());
+        response.setNetworkId(ntwk.getUuid());
+        response.setAssociatedNetworkName(ntwk.getName());
+        List<String> vmIds = new ArrayList<String>();
+        List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        if (vmList != null && !vmList.isEmpty()) {
+            for (KubernetesClusterVmMapVO vmMapVO : vmList) {
+                UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
+                if (userVM != null) {
+                    vmIds.add(userVM.getUuid());
+                }
+            }
+        }
+        response.setVirtualMachineIds(vmIds);
+        return response;
+    }
+
+    private void validateKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException {
+        final String name = cmd.getName();
+        final Long zoneId = cmd.getZoneId();
+        final Long kubernetesVersionId = cmd.getKubernetesVersionId();
+        final Long serviceOfferingId = cmd.getServiceOfferingId();
+        final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId());
+        final Long networkId = cmd.getNetworkId();
+        final String sshKeyPair = cmd.getSSHKeyPairName();
+        final Long masterNodeCount = cmd.getMasterNodes();
+        final Long clusterSize = cmd.getClusterSize();
+        final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
+        final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
+        final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
+        final String dockerRegistryEmail = cmd.getDockerRegistryEmail();
+        final Long nodeRootDiskSize = cmd.getNodeRootDiskSize();
+        final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress();
+
+        if (name == null || name.isEmpty()) {
+            throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name);
+        }
+
+        if (masterNodeCount < 1 || masterNodeCount > 100) {
+            throw new InvalidParameterValueException("Invalid cluster master nodes count: " + masterNodeCount);
+        }
+
+        if (clusterSize < 1 || clusterSize > 100) {
+            throw new InvalidParameterValueException("Invalid cluster size: " + clusterSize);
+        }
+
+        DataCenter zone = dataCenterDao.findById(zoneId);
+        if (zone == null) {
+            throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId);
+        }
+
+        if (Grouping.AllocationState.Disabled == zone.getAllocationState()) {
+            throw new PermissionDeniedException(String.format("Cannot perform this operation, zone ID: %s is currently disabled", zone.getUuid()));
+        }
+
+        if (!isKubernetesServiceConfigured(zone)) {
+            throw new CloudRuntimeException("Kubernetes service has not been configured properly to provision Kubernetes clusters");
+        }
+
+        final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(kubernetesVersionId);
+        if (clusterKubernetesVersion == null) {
+            throw new InvalidParameterValueException("Unable to find given Kubernetes version in supported versions");
+        }
+        if (!KubernetesSupportedVersion.State.Enabled.equals(clusterKubernetesVersion.getState())) {
+            throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is in %s state", clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getState()));
+        }
+        if (clusterKubernetesVersion.getZoneId() != null && !clusterKubernetesVersion.getZoneId().equals(zone.getId())) {
+            throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid()));
+        }
+        if (masterNodeCount > 1 ) {
+            try {
+                if (KubernetesVersionManagerImpl.compareSemanticVersions(clusterKubernetesVersion.getSemanticVersion(), MIN_KUBERNETES_VERSION_HA_SUPPORT) < 0) {
+                    throw new InvalidParameterValueException(String.format("HA support is available only for Kubernetes version %s and above. Given version ID: %s is %s", MIN_KUBERNETES_VERSION_HA_SUPPORT, clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getSemanticVersion()));
+                }
+            } catch (IllegalArgumentException e) {
+                logAndThrow(Level.WARN, String.format("Unable to compare Kubernetes version for given version ID: %s with %s", clusterKubernetesVersion.getUuid(), MIN_KUBERNETES_VERSION_HA_SUPPORT), e);
+            }
+        }
+
+        if (clusterKubernetesVersion.getZoneId() != null && clusterKubernetesVersion.getZoneId() != zone.getId()) {
+            throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid()));
+        }
+
+        TemplateJoinVO iso = templateJoinDao.findById(clusterKubernetesVersion.getIsoId());
+        if (iso == null) {
+            throw new InvalidParameterValueException(String.format("Invalid ISO associated with version ID: %s",  clusterKubernetesVersion.getUuid()));
+        }
+        if (!ObjectInDataStoreStateMachine.State.Ready.equals(iso.getState())) {
+            throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state",  clusterKubernetesVersion.getUuid()));
+        }
+
+        ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
+        if (serviceOffering == null) {
+            throw new InvalidParameterValueException("No service offering with ID: " + serviceOfferingId);
+        }
+
+        if (sshKeyPair != null && !sshKeyPair.isEmpty()) {
+            SSHKeyPairVO sshKeyPairVO = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
+            if (sshKeyPairVO == null) {
+                throw new InvalidParameterValueException(String.format("Given SSH key pair with name: %s was not found for the account %s", sshKeyPair, owner.getAccountName()));
+            }
+        }
+
+        if (nodeRootDiskSize != null && nodeRootDiskSize <= 0) {
+            throw new InvalidParameterValueException(String.format("Invalid value for %s", ApiConstants.NODE_ROOT_DISK_SIZE));
+        }
+
+        if (!validateServiceOffering(serviceOffering, clusterKubernetesVersion)) {
+            throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster");
+        }
+
+        validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl, dockerRegistryEmail);
+
+        Network network = null;
+        if (networkId != null) {
+            network = networkService.getNetwork(networkId);
+            if (network == null) {
+                throw new InvalidParameterValueException("Unable to find network with given ID");
+            }
+        }
+
+        if (!Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) {
+            if (!NetUtils.isValidIp4(externalLoadBalancerIpAddress) && !NetUtils.isValidIp6(externalLoadBalancerIpAddress)) {
+                throw new InvalidParameterValueException("Invalid external load balancer IP address");
+            }
+            if (network == null) {
+                throw new InvalidParameterValueException(String.format("%s parameter must be specified along with %s parameter", ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, ApiConstants.NETWORK_ID));
+            }
+            if (Network.GuestType.Shared.equals(network.getGuestType())) {
+                throw new InvalidParameterValueException(String.format("%s parameter must be specified along with %s type of network", ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, Network.GuestType.Shared.toString()));
+            }
+        }
+
+        if (!KubernetesClusterExperimentalFeaturesEnabled.value() && (!Strings.isNullOrEmpty(dockerRegistryUrl) ||
+                !Strings.isNullOrEmpty(dockerRegistryUserName) || !Strings.isNullOrEmpty(dockerRegistryEmail) || !Strings.isNullOrEmpty(dockerRegistryPassword))) {
+            throw new CloudRuntimeException(String.format("Private registry for the Kubernetes cluster is an experimental feature. Use %s configuration for enabling experimental features", KubernetesClusterExperimentalFeaturesEnabled.key()));
+        }
+    }
+
+    private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone,  final Account owner, final int masterNodesCount,
+                         final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException {
+        Network network = null;
+        if (networkId != null) {
+            network = networkDao.findById(networkId);
+            if (Network.GuestType.Isolated.equals(network.getGuestType())) {
+                if (kubernetesClusterDao.listByNetworkId(network.getId()).isEmpty()) {
+                    if (!validateNetwork(network, masterNodesCount + nodesCount)) {
+                        throw new InvalidParameterValueException(String.format("Network ID: %s is not suitable for Kubernetes cluster", network.getUuid()));
+                    }
+                    networkModel.checkNetworkPermissions(owner, network);
+                } else {
+                    throw new InvalidParameterValueException(String.format("Network ID: %s is already under use by another Kubernetes cluster", network.getUuid()));
+                }
+            } else if (Network.GuestType.Shared.equals(network.getGuestType())) {
+                if (masterNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) {
+                    throw new InvalidParameterValueException(String.format("Multi-master, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used",
+                            network.getGuestType().toString(), network.getUuid(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS));
+                }
+            }
+        } else { // user has not specified network in which cluster VM's to be provisioned, so create a network for Kubernetes cluster
+            NetworkOfferingVO networkOffering = networkOfferingDao.findByUniqueName(KubernetesClusterNetworkOffering.value());
+
+            long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType());
+            PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId);
+
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName));
+            }
+
+            try {
+                network = networkMgr.createGuestNetwork(networkOffering.getId(), clusterName + "-network", owner.getAccountName() + "-network",
+                        null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, null);
+            } catch (ConcurrentOperationException | InsufficientCapacityException | ResourceAllocationException e) {
+                logAndThrow(Level.ERROR, String.format("Unable to create network for the Kubernetes cluster: %s", clusterName));
+            }
+        }
+        return network;
+    }
+
+    private void addKubernetesClusterDetails(final KubernetesCluster kubernetesCluster, final Network network, final CreateKubernetesClusterCmd cmd) {
+        final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress();
+        final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
+        final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
+        final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
+        final String dockerRegistryEmail = cmd.getDockerRegistryEmail();
+        final boolean networkCleanup = cmd.getNetworkId() == null;
+        Transaction.execute(new TransactionCallbackNoReturn() {
+            @Override
+            public void doInTransactionWithoutResult(TransactionStatus status) {
+                List<KubernetesClusterDetailsVO> details = new ArrayList<>();
+                if (Network.GuestType.Shared.equals(network.getGuestType()) && !Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) {
+                    details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, externalLoadBalancerIpAddress, true));
+                }
+                if (!Strings.isNullOrEmpty(dockerRegistryUserName)) {
+                    details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_USER_NAME, dockerRegistryUserName, true));
+                }
+                if (!Strings.isNullOrEmpty(dockerRegistryPassword)) {
+                    details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_PASSWORD, dockerRegistryPassword, false));
+                }
+                if (!Strings.isNullOrEmpty(dockerRegistryUrl)) {
+                    details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_URL, dockerRegistryUrl, true));
+                }
+                if (!Strings.isNullOrEmpty(dockerRegistryEmail)) {
+                    details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_EMAIL, dockerRegistryEmail, true));
+                }
+                details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.USERNAME, "admin", true));
+                SecureRandom random = new SecureRandom();
+                String randomPassword = new BigInteger(130, random).toString(32);
+                details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.PASSWORD, randomPassword, false));
+                details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), "networkCleanup", String.valueOf(networkCleanup), true));
+                kubernetesClusterDetailsDao.saveDetails(details);
+            }
+        });
+    }
+
+    private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd cmd) {
+        final Long kubernetesClusterId = cmd.getId();
+        final Long serviceOfferingId = cmd.getServiceOfferingId();
+        final Long clusterSize = cmd.getClusterSize();
+        if (kubernetesClusterId == null || kubernetesClusterId < 1L) {
+            throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
+        }
+        KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
+        }
+        final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        if (zone == null) {
+            logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+
+        Account caller = CallContext.current().getCallingAccount();
+        accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster);
+
+        if (serviceOfferingId == null && clusterSize == null) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size must be passed", kubernetesCluster.getUuid()));
+        }
+
+        final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
+        if (clusterVersion == null) {
+            throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+
+        ServiceOffering serviceOffering = null;
+        if (serviceOfferingId != null) {
+            serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
+            if (serviceOffering == null) {
+                throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId);
+            } else {
+                if (serviceOffering.isDynamic()) {
+                    throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster ID: %s, service offering ID: %s", kubernetesCluster.getUuid(), serviceOffering.getUuid()));
+                }
+                if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
+                    throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM",
+                            kubernetesCluster.getUuid(), serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
+                }
+                if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) {
+                    throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d vCPUs",
+                            kubernetesCluster.getUuid(), serviceOffering.getUuid(), clusterVersion.getUuid(), clusterVersion.getMinimumCpu()));
+                }
+                if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) {
+                    throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM",
+                            kubernetesCluster.getUuid(), serviceOffering.getUuid(), clusterVersion.getUuid(), clusterVersion.getMinimumRamSize()));
+                }
+            }
+            final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+            if (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() ||
+                    serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed()) {
+                logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering ID: %s offers lesser resources as compared to service offering ID: %s of Kubernetes cluster ID: %s",
+                        serviceOffering.getUuid(), existingServiceOffering.getUuid(), kubernetesCluster.getUuid()));
+            }
+        }
+
+        if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) ||
+                kubernetesCluster.getState().equals(KubernetesCluster.State.Running) ||
+                kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) {
+            throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString()));
+        }
+
+        if (clusterSize != null) {
+            if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { // Cannot scale stopped cluster currently for cluster size
+                throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString()));
+            }
+            if (clusterSize < 1) {
+                throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled for size, %d", kubernetesCluster.getUuid(), clusterSize));
+            }
+        }
+    }
+
+    private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) {
+        // Validate parameters
+        final Long kubernetesClusterId = cmd.getId();
+        final Long upgradeVersionId = cmd.getKubernetesVersionId();
+        if (kubernetesClusterId == null || kubernetesClusterId < 1L) {
+            throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
+        }
+        if (upgradeVersionId == null || upgradeVersionId < 1L) {
+            throw new InvalidParameterValueException("Invalid Kubernetes version ID");
+        }
+        KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
+        }
+        accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster);
+        if (!KubernetesCluster.State.Running.equals(kubernetesCluster.getState())) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is not in running state", kubernetesCluster.getUuid()));
+        }
+        KubernetesSupportedVersionVO upgradeVersion = kubernetesSupportedVersionDao.findById(upgradeVersionId);
+        if (upgradeVersion == null || upgradeVersion.getRemoved() != null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes version ID");
+        }
+        if (!KubernetesSupportedVersion.State.Enabled.equals(upgradeVersion.getState())) {
+            throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s for upgrade is in %s state", upgradeVersion.getUuid(), upgradeVersion.getState()));
+        }
+        KubernetesSupportedVersionVO clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
+        if (clusterVersion == null || clusterVersion.getRemoved() != null) {
+            throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster ID: %s",
+                    kubernetesCluster.getUuid()));
+        }
+        final ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(kubernetesCluster.getServiceOfferingId());
+        if (serviceOffering == null) {
+            throw new CloudRuntimeException(String.format("Invalid service offering associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        if (serviceOffering.getCpu() < upgradeVersion.getMinimumCpu()) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be upgraded with Kubernetes version ID: %s which needs minimum %d vCPUs while associated service offering ID: %s offers only %d vCPUs",
+                    kubernetesCluster.getUuid(), upgradeVersion.getUuid(), upgradeVersion.getMinimumCpu(), serviceOffering.getUuid(), serviceOffering.getCpu()));
+        }
+        if (serviceOffering.getRamSize() < upgradeVersion.getMinimumRamSize()) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be upgraded with Kubernetes version ID: %s which needs minimum %d MB RAM while associated service offering ID: %s offers only %d MB RAM",
+                    kubernetesCluster.getUuid(), upgradeVersion.getUuid(), upgradeVersion.getMinimumRamSize(), serviceOffering.getUuid(), serviceOffering.getRamSize()));
+        }
+        // Check upgradeVersion is either patch upgrade or immediate minor upgrade
+        try {
+            KubernetesVersionManagerImpl.canUpgradeKubernetesVersion(clusterVersion.getSemanticVersion(), upgradeVersion.getSemanticVersion());
+        } catch (IllegalArgumentException e) {
+            throw new InvalidParameterValueException(e.getMessage());
+        }
+
+        TemplateJoinVO iso = templateJoinDao.findById(upgradeVersion.getIsoId());
+        if (iso == null) {
+            throw new InvalidParameterValueException(String.format("Invalid ISO associated with version ID: %s",  upgradeVersion.getUuid()));
+        }
+        if (!ObjectInDataStoreStateMachine.State.Ready.equals(iso.getState())) {
+            throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state",  upgradeVersion.getUuid()));
+        }
+    }
+
+    protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) {
+        KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        try {
+            return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao);
+        } catch (NoTransitionException nte) {
+            LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster ID: %s in state %s on event %s", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString(), e.toString()), nte);
+            return false;
+        }
+    }
+
+    @Override
+    public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+
+        validateKubernetesClusterCreateParameters(cmd);
+
+        final DataCenter zone = dataCenterDao.findById(cmd.getZoneId());
+        final long masterNodeCount = cmd.getMasterNodes();
+        final long clusterSize = cmd.getClusterSize();
+        final long totalNodeCount = masterNodeCount + clusterSize;
+        final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId());
+        final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId());
+        final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId());
+
+        DeployDestination deployDestination = null;
+        try {
+            deployDestination = plan(totalNodeCount, zone, serviceOffering);
+        } catch (InsufficientCapacityException e) {
+            logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d cluster nodes in zone ID: %s with service offering ID: %s", totalNodeCount, zone.getUuid(), serviceOffering.getUuid()));
+        }
+        if (deployDestination == null || deployDestination.getCluster() == null) {
+            logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone ID: %s", zone.getUuid()));
+        }
+
+        final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)masterNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId());
+        final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType());
+        final long cores = serviceOffering.getCpu() * (masterNodeCount + clusterSize);
+        final long memory = serviceOffering.getRamSize() * (masterNodeCount + clusterSize);
+
+        final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
+            @Override
+            public KubernetesClusterVO doInTransaction(TransactionStatus status) {
+                KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(),
+                        serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(),
+                        owner.getAccountId(), masterNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), "");
+                kubernetesClusterDao.persist(newCluster);
+                return newCluster;
+            }
+        });
+
+        addKubernetesClusterDetails(cluster, defaultNetwork, cmd);
+
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid()));
+        }
+        return cluster;
+    }
+
+    /**
+     * Start operation can be performed at two different life stages of Kubernetes cluster. First when a freshly created cluster
+     * in which case there are no resources provisioned for the Kubernetes cluster. So during start all the resources
+     * are provisioned from scratch. Second kind of start, happens on  Stopped Kubernetes cluster, in which all resources
+     * are provisioned (like volumes, nics, networks etc). It just that VM's are not in running state. So just
+     * start the VM's (which can possibly implicitly start the network also).
+     * @param kubernetesClusterId
+     * @param onCreate
+     * @return
+     * @throws CloudRuntimeException
+     */
+
+    @Override
+    public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        final KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        if (kubernetesCluster == null) {
+            throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID");
+        }
+        if (kubernetesCluster.getRemoved() != null) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is already deleted", kubernetesCluster.getUuid()));
+        }
+        accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster);
+        if (kubernetesCluster.getState().equals(KubernetesCluster.State.Running)) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Kubernetes cluster ID: %s is in running state", kubernetesCluster.getUuid()));
+            }
+            return true;
+        }
+        if (kubernetesCluster.getState().equals(KubernetesCluster.State.Starting)) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Kubernetes cluster ID: %s is already in starting state", kubernetesCluster.getUuid()));
+            }
+            return true;
+        }
+        final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        if (zone == null) {
+            logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        KubernetesClusterStartWorker startWorker =
+                new KubernetesClusterStartWorker(kubernetesCluster, this);
+        startWorker = ComponentContext.inject(startWorker);
+        if (onCreate) {
+            // Start for Kubernetes cluster in 'Created' state
+            return startWorker.startKubernetesClusterOnCreate();
+        } else {
+            // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started
+            return startWorker.startStoppedKubernetesCluster();
+        }
+    }
+
+    @Override
+    public boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        final KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        if (kubernetesCluster == null) {
+            throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID");
+        }
+        if (kubernetesCluster.getRemoved() != null) {
+            throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is already deleted", kubernetesCluster.getUuid()));
+        }
+        accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster);
+        if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Kubernetes cluster ID: %s is already stopped", kubernetesCluster.getUuid()));
+            }
+            return true;
+        }
+        if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopping)) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Kubernetes cluster ID: %s is getting stopped", kubernetesCluster.getUuid()));
+            }
+            return true;
+        }
+        KubernetesClusterStopWorker stopWorker = new KubernetesClusterStopWorker(kubernetesCluster, this);
+        stopWorker = ComponentContext.inject(stopWorker);
+        return stopWorker.stop();
+    }
+
+    @Override
+    public boolean deleteKubernetesCluster(Long kubernetesClusterId) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        KubernetesClusterVO cluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        if (cluster == null) {
+            throw new InvalidParameterValueException("Invalid cluster id specified");
+        }
+        accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, cluster);
+        KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(cluster, this);
+        destroyWorker = ComponentContext.inject(destroyWorker);
+        return destroyWorker.destroy();
+    }
+
+    @Override
+    public ListResponse<KubernetesClusterResponse> listKubernetesClusters(ListKubernetesClustersCmd cmd) {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        final CallContext ctx = CallContext.current();
+        final Account caller = ctx.getCallingAccount();
+        final Long clusterId = cmd.getId();
+        final String state = cmd.getState();
+        final String name = cmd.getName();
+        final String keyword = cmd.getKeyword();
+        List<KubernetesClusterResponse> responsesList = new ArrayList<KubernetesClusterResponse>();
+        List<Long> permittedAccounts = new ArrayList<Long>();
+        Ternary<Long, Boolean, Project.ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<Long, Boolean, Project.ListProjectResourcesCriteria>(cmd.getDomainId(), cmd.isRecursive(), null);
+        accountManager.buildACLSearchParameters(caller, clusterId, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false);
+        Long domainId = domainIdRecursiveListProject.first();
+        Boolean isRecursive = domainIdRecursiveListProject.second();
+        Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third();
+        Filter searchFilter = new Filter(KubernetesClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
+        SearchBuilder<KubernetesClusterVO> sb = kubernetesClusterDao.createSearchBuilder();
+        accountManager.buildACLSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria);
+        sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
+        sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ);
+        sb.and("keyword", sb.entity().getName(), SearchCriteria.Op.LIKE);
+        sb.and("state", sb.entity().getState(), SearchCriteria.Op.IN);
+        SearchCriteria<KubernetesClusterVO> sc = sb.create();
+        accountManager.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria);
+        if (state != null) {
+            sc.setParameters("state", state);
+        }
+        if(keyword != null){
+            sc.setParameters("keyword", "%" + keyword + "%");
+        }
+        if (clusterId != null) {
+            sc.setParameters("id", clusterId);
+        }
+        if (name != null) {
+            sc.setParameters("name", name);
+        }
+        List<KubernetesClusterVO> kubernetesClusters = kubernetesClusterDao.search(sc, searchFilter);
+        for (KubernetesClusterVO cluster : kubernetesClusters) {
+            KubernetesClusterResponse clusterResponse = createKubernetesClusterResponse(cluster.getId());
+            responsesList.add(clusterResponse);
+        }
+        ListResponse<KubernetesClusterResponse> response = new ListResponse<KubernetesClusterResponse>();
+        response.setResponses(responsesList);
+        return response;
+    }
+
+    public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesClusterConfigCmd cmd) {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        final Long clusterId = cmd.getId();
+        KubernetesCluster kubernetesCluster = kubernetesClusterDao.findById(clusterId);
+        if (kubernetesCluster == null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes cluster ID specified");
+        }
+        KubernetesClusterConfigResponse response = new KubernetesClusterConfigResponse();
+        response.setId(kubernetesCluster.getUuid());
+        response.setName(kubernetesCluster.getName());
+        String configData = "";
+        KubernetesClusterDetailsVO clusterDetailsVO = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData");
+        if (clusterDetailsVO != null && !Strings.isNullOrEmpty(clusterDetailsVO.getValue())) {
+            configData = new String(Base64.decodeBase64(clusterDetailsVO.getValue()));
+        } else {
+            if (KubernetesCluster.State.Starting.equals(kubernetesCluster.getState())) {
+                throw new CloudRuntimeException(String.format("Setup is in progress for Kubernetes cluster ID: %s, config not available at this moment", kubernetesCluster.getUuid()));
+            } else {
+                throw new CloudRuntimeException((String.format("Config not found for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())));
+            }
+        }
+        response.setConfigData(configData);
+        response.setObjectName("clusterconfig");
+        return response;
+    }
+
+    @Override
+    public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        validateKubernetesClusterScaleParameters(cmd);
+        KubernetesClusterScaleWorker scaleWorker =
+                new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()),
+                        serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), this);
+        scaleWorker = ComponentContext.inject(scaleWorker);
+        return scaleWorker.scaleCluster();
+    }
+
+    @Override
+    public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException {
+        if (!KubernetesServiceEnabled.value()) {
+            logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
+        }
+        validateKubernetesClusterUpgradeParameters(cmd);
+        KubernetesClusterUpgradeWorker upgradeWorker =
+                new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()),
+                        kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this);
+        upgradeWorker = ComponentContext.inject(upgradeWorker);
+        return upgradeWorker.upgradeCluster();
+    }
+
+    @Override
+    public List<Class<?>> getCommands() {
+        List<Class<?>> cmdList = new ArrayList<Class<?>>();
+        if (!KubernetesServiceEnabled.value()) {
+            return cmdList;
+        }
+        cmdList.add(CreateKubernetesClusterCmd.class);
+        cmdList.add(StartKubernetesClusterCmd.class);
+        cmdList.add(StopKubernetesClusterCmd.class);
+        cmdList.add(DeleteKubernetesClusterCmd.class);
+        cmdList.add(ListKubernetesClustersCmd.class);
+        cmdList.add(GetKubernetesClusterConfigCmd.class);
+        cmdList.add(ScaleKubernetesClusterCmd.class);
+        cmdList.add(UpgradeKubernetesClusterCmd.class);
+        return cmdList;
+    }
+
+    @Override
+    public KubernetesCluster findById(final Long id) {
+        return kubernetesClusterDao.findById(id);
+    }
+
+    // Garbage collector periodically run through the Kubernetes clusters marked for GC. For each Kubernetes cluster
+    // marked for GC, attempt is made to destroy cluster.
+    public class KubernetesClusterGarbageCollector extends ManagedContextRunnable {
+        @Override
+        protected void runInContext() {
+            GlobalLock gcLock = GlobalLock.getInternLock("KubernetesCluster.GC.Lock");
+            try {
+                if (gcLock.lock(3)) {
+                    try {
+                        reallyRun();
+                    } finally {
+                        gcLock.unlock();
+                    }
+                }
+            } finally {
+                gcLock.releaseRef();
+            }
+        }
+
+        public void reallyRun() {
+            try {
+                List<KubernetesClusterVO> kubernetesClusters = kubernetesClusterDao.findKubernetesClustersToGarbageCollect();
+                for (KubernetesCluster kubernetesCluster : kubernetesClusters) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+                    }
+                    try {
+                        KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this);
+                        destroyWorker = ComponentContext.inject(destroyWorker);
+                        if (destroyWorker.destroy()) {
+                            if (LOGGER.isInfoEnabled()) {
+                                LOGGER.info(String.format("Garbage collection complete for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+                            }
+                        } else {
+                            LOGGER.warn(String.format("Garbage collection failed for Kubernetes cluster ID: %s, it will be attempted to garbage collected in next run", kubernetesCluster.getUuid()));
+                        }
+                    } catch (CloudRuntimeException e) {
+                        LOGGER.warn(String.format("Failed to destroy Kubernetes cluster ID: %s during GC", kubernetesCluster.getUuid()), e);
+                        // proceed further with rest of the Kubernetes cluster garbage collection
+                    }
+                }
+            } catch (Exception e) {
+                LOGGER.warn("Caught exception while running Kubernetes cluster gc: ", e);
+            }
+        }
+    }
+
+    /* Kubernetes cluster scanner checks if the Kubernetes cluster is in desired state. If it detects Kubernetes cluster
+       is not in desired state, it will trigger an event and marks the Kubernetes cluster to be 'Alert' state. For e.g a
+       Kubernetes cluster in 'Running' state should mean all the cluster of node VM's in the custer should be running and
+       number of the node VM's should be of cluster size, and the master node VM's is running. It is possible due to
+       out of band changes by user or hosts going down, we may end up one or more VM's in stopped state. in which case
+       scanner detects these changes and marks the cluster in 'Alert' state. Similarly cluster in 'Stopped' state means
+       all the cluster VM's are in stopped state any mismatch in states should get picked up by Kubernetes cluster and
+       mark the Kubernetes cluster to be 'Alert' state. Through recovery API, or reconciliation clusters in 'Alert' will
+       be brought back to known good state or desired state.
+     */
+    public class KubernetesClusterStatusScanner extends ManagedContextRunnable {
+        private boolean firstRun = true;
+        @Override
+        protected void runInContext() {
+            GlobalLock gcLock = GlobalLock.getInternLock("KubernetesCluster.State.Scanner.Lock");
+            try {
+                if (gcLock.lock(3)) {
+                    try {
+                        reallyRun();
+                    } finally {
+                        gcLock.unlock();
+                    }
+                }
+            } finally {
+                gcLock.releaseRef();
+            }
+        }
+
+        public void reallyRun() {
+            try {
+                // run through Kubernetes clusters in 'Running' state and ensure all the VM's are Running in the cluster
+                List<KubernetesClusterVO> runningKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Running);
+                for (KubernetesCluster kubernetesCluster : runningKubernetesClusters) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+                    }
+                    try {
+                        if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) {
+                            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected);
+                        }
+                    } catch (Exception e) {
+                        LOGGER.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e);
+                    }
+                }
+
+                // run through Kubernetes clusters in 'Stopped' state and ensure all the VM's are Stopped in the cluster
+                List<KubernetesClusterVO> stoppedKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Stopped);
+                for (KubernetesCluster kubernetesCluster : stoppedKubernetesClusters) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Stopped.toString()));
+                    }
+                    try {
+                        if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) {
+                            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected);
+                        }
+                    } catch (Exception e) {
+                        LOGGER.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e);
+                    }
+                }
+
+                // run through Kubernetes clusters in 'Alert' state and reconcile state as 'Running' if the VM's are running or 'Stopped' if VM's are stopped
+                List<KubernetesClusterVO> alertKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Alert);
+                for (KubernetesClusterVO kubernetesCluster : alertKubernetesClusters) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Alert.toString()));
+                    }
+                    try {
+                        if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) {
+                            KubernetesClusterStartWorker startWorker =
+                                    new KubernetesClusterStartWorker(kubernetesCluster, KubernetesClusterManagerImpl.this);
+                            startWorker = ComponentContext.inject(startWorker);
+                            startWorker.reconcileAlertCluster();
+                        } else if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) {
+                            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested);
+                            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+                        }
+                    } catch (Exception e) {
+                        LOGGER.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e);
+                    }
+                }
+
+
+                if (firstRun) {
+                    // run through Kubernetes clusters in 'Starting' state and reconcile state as 'Alert' or 'Error' if the VM's are running
+                    List<KubernetesClusterVO> startingKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Starting);
+                    for (KubernetesCluster kubernetesCluster : startingKubernetesClusters) {
+                        if ((new Date()).getTime() - kubernetesCluster.getCreated().getTime() < 10*60*1000) {
+                            continue;
+                        }
+                        if (LOGGER.isInfoEnabled()) {
+                            LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Starting.toString()));
+                        }
+                        try {
+                            if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) {
+                                stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected);
+                            } else {
+                                stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+                            }
+                        } catch (Exception e) {
+                            LOGGER.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e);
+                        }
+                    }
+                    List<KubernetesClusterVO> destroyingKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Destroying);
+                    for (KubernetesCluster kubernetesCluster : destroyingKubernetesClusters) {
+                        if (LOGGER.isInfoEnabled()) {
+                            LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Destroying.toString()));
+                        }
+                        try {
+                            KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this);
+                            destroyWorker = ComponentContext.inject(destroyWorker);
+                            destroyWorker.destroy();
+                        } catch (Exception e) {
+                            LOGGER.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e);
+                        }
+                    }
+                }
+            } catch (Exception e) {
+                LOGGER.warn("Caught exception while running Kubernetes cluster state scanner", e);
+            }
+            firstRun = false;
+        }
+    }
+
+    // checks if Kubernetes cluster is in desired state
+    boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualMachine.State state) {
+        List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+
+        // check cluster is running at desired capacity include master nodes as well
+        if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s",
+                        clusterVMs.size(), kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), state.toString()));
+            }
+            return false;
+        }
+        // check if all the VM's are in same state
+        for (KubernetesClusterVmMapVO clusterVm : clusterVMs) {
+            VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId());
+            if (vm.getState() != state) {
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(String.format("Found VM ID: %s in the Kubernetes cluster ID: %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation",
+                            vm.getUuid(), kubernetesCluster.getUuid(), vm.getState().toString(), state.toString()));
+                }
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    @Override
+    public boolean start() {
+        final Map<Network.Service, Network.Provider> defaultKubernetesServiceNetworkOfferingProviders = new HashMap<Service, Network.Provider>();
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Dhcp, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Dns, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.UserData, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Firewall, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Gateway, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Lb, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.SourceNat, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.StaticNat, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.PortForwarding, Network.Provider.VirtualRouter);
+        defaultKubernetesServiceNetworkOfferingProviders.put(Service.Vpn, Network.Provider.VirtualRouter);
+
+        NetworkOfferingVO defaultKubernetesServiceNetworkOffering =
+                new NetworkOfferingVO(DEFAULT_NETWORK_OFFERING_FOR_KUBERNETES_SERVICE_NAME,
+                        "Network Offering used for CloudStack Kubernetes service", Networks.TrafficType.Guest,
+                        false, false, null, null, true,
+                        NetworkOffering.Availability.Required, null, Network.GuestType.Isolated, true,
+                        true, false, false, false, false,
+                        false, false, false, true, true, false,
+                        false, true, false, false);
+        defaultKubernetesServiceNetworkOffering.setState(NetworkOffering.State.Enabled);
+        defaultKubernetesServiceNetworkOffering = networkOfferingDao.persistDefaultNetworkOffering(defaultKubernetesServiceNetworkOffering);
+
+        for (Service service : defaultKubernetesServiceNetworkOfferingProviders.keySet()) {
+            NetworkOfferingServiceMapVO offService =
+                    new NetworkOfferingServiceMapVO(defaultKubernetesServiceNetworkOffering.getId(), service,
+                            defaultKubernetesServiceNetworkOfferingProviders.get(service));
+            networkOfferingServiceMapDao.persist(offService);
+            LOGGER.trace("Added service for the network offering: " + offService);
+        }
+
+        _gcExecutor.scheduleWithFixedDelay(new KubernetesClusterGarbageCollector(), 300, 300, TimeUnit.SECONDS);
+        _stateScanner.scheduleWithFixedDelay(new KubernetesClusterStatusScanner(), 300, 30, TimeUnit.SECONDS);
+
+        return true;
+    }
+
+    @Override
+    public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
+        _name = name;
+        _configParams = params;
+        _gcExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Kubernetes-Cluster-Scavenger"));
+        _stateScanner = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Kubernetes-Cluster-State-Scanner"));
+
+        return true;
+    }
+
+    @Override
+    public String getConfigComponentName() {
+        return KubernetesClusterService.class.getSimpleName();
+    }
+
+    @Override
+    public ConfigKey<?>[] getConfigKeys() {
+        return new ConfigKey<?>[] {
+                KubernetesServiceEnabled,
+                KubernetesClusterHyperVTemplateName,
+                KubernetesClusterKVMTemplateName,
+                KubernetesClusterVMwareTemplateName,
+                KubernetesClusterXenserverTemplateName,
+                KubernetesClusterNetworkOffering,
+                KubernetesClusterStartTimeout,
+                KubernetesClusterScaleTimeout,
+                KubernetesClusterUpgradeTimeout,
+                KubernetesClusterExperimentalFeaturesEnabled
+        };
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java
new file mode 100644
index 0000000..db5ab91
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd;
+import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.framework.config.Configurable;
+
+import com.cloud.utils.component.PluggableService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public interface KubernetesClusterService extends PluggableService, Configurable {
+    static final String MIN_KUBERNETES_VERSION_HA_SUPPORT = "1.16.0";
+    static final int MIN_KUBERNETES_CLUSTER_NODE_CPU = 2;
+    static final int MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE = 2048;
+
+    static final ConfigKey<Boolean> KubernetesServiceEnabled = new ConfigKey<Boolean>("Advanced", Boolean.class,
+            "cloud.kubernetes.service.enabled",
+            "false",
+            "Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change",
+            false);
+    static final ConfigKey<String> KubernetesClusterHyperVTemplateName = new ConfigKey<String>("Advanced", String.class,
+            "cloud.kubernetes.cluster.template.name.hyperv",
+            "Kubernetes-Service-Template-HyperV",
+            "Name of the template to be used for creating Kubernetes cluster nodes on HyperV",
+            true);
+    static final ConfigKey<String> KubernetesClusterKVMTemplateName = new ConfigKey<String>("Advanced", String.class,
+            "cloud.kubernetes.cluster.template.name.kvm",
+            "Kubernetes-Service-Template-KVM",
+            "Name of the template to be used for creating Kubernetes cluster nodes on KVM",
+            true);
+    static final ConfigKey<String> KubernetesClusterVMwareTemplateName = new ConfigKey<String>("Advanced", String.class,
+            "cloud.kubernetes.cluster.template.name.vmware",
+            "Kubernetes-Service-Template-VMware",
+            "Name of the template to be used for creating Kubernetes cluster nodes on VMware",
+            true);
+    static final ConfigKey<String> KubernetesClusterXenserverTemplateName = new ConfigKey<String>("Advanced", String.class,
+            "cloud.kubernetes.cluster.template.name.xenserver",
+            "Kubernetes-Service-Template-Xenserver",
+            "Name of the template to be used for creating Kubernetes cluster nodes on Xenserver",
+            true);
+    static final ConfigKey<String> KubernetesClusterNetworkOffering = new ConfigKey<String>("Advanced", String.class,
+            "cloud.kubernetes.cluster.network.offering",
+            "DefaultNetworkOfferingforKubernetesService",
+            "Name of the network offering that will be used to create isolated network in which Kubernetes cluster VMs will be launched",
+            false);
+    static final ConfigKey<Long> KubernetesClusterStartTimeout = new ConfigKey<Long>("Advanced", Long.class,
+            "cloud.kubernetes.cluster.start.timeout",
+            "3600",
+            "Timeout interval (in seconds) in which start operation for a Kubernetes cluster should be completed",
+            true);
+    static final ConfigKey<Long> KubernetesClusterScaleTimeout = new ConfigKey<Long>("Advanced", Long.class,
+            "cloud.kubernetes.cluster.scale.timeout",
+            "3600",
+            "Timeout interval (in seconds) in which scale operation for a Kubernetes cluster should be completed",
+            true);
+    static final ConfigKey<Long> KubernetesClusterUpgradeTimeout = new ConfigKey<Long>("Advanced", Long.class,
+            "cloud.kubernetes.cluster.upgrade.timeout",
+            "3600",
+            "Timeout interval (in seconds) in which upgrade operation for a Kubernetes cluster should be completed. Not strictly obeyed while upgrade is in progress on a node",
+            true);
+    static final ConfigKey<Boolean> KubernetesClusterExperimentalFeaturesEnabled = new ConfigKey<Boolean>("Advanced", Boolean.class,
+            "cloud.kubernetes.cluster.experimental.features.enabled",
+            "false",
+            "Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not",
+            true);
+
+    KubernetesCluster findById(final Long id);
+
+    KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException;
+
+    boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate) throws CloudRuntimeException;
+
+    boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException;
+
+    boolean deleteKubernetesCluster(Long kubernetesClusterId) throws CloudRuntimeException;
+
+    ListResponse<KubernetesClusterResponse> listKubernetesClusters(ListKubernetesClustersCmd cmd);
+
+    KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesClusterConfigCmd cmd);
+
+    KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId);
+
+    boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException;
+
+    boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException;
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java
new file mode 100644
index 0000000..9ff0be3
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java
@@ -0,0 +1,340 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+import java.util.Date;
+import java.util.UUID;
+
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+import com.cloud.utils.db.GenericDao;
+
+@Entity
+@Table(name = "kubernetes_cluster")
+public class KubernetesClusterVO implements KubernetesCluster {
+
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    private long id;
+
+    @Column(name = "uuid")
+    private String uuid;
+
+    @Column(name = "name")
+    private String name;
+
+    @Column(name = "description", length = 4096)
+    private String description;
+
+    @Column(name = "zone_id")
+    private long zoneId;
+
+    @Column(name = "kubernetes_version_id")
+    private long kubernetesVersionId;
+
+    @Column(name = "service_offering_id")
+    private long serviceOfferingId;
+
+    @Column(name = "template_id")
+    private long templateId;
+
+    @Column(name = "network_id")
+    private long networkId;
+
+    @Column(name = "domain_id")
+    private long domainId;
+
+    @Column(name = "account_id")
+    private long accountId;
+
+    @Column(name = "master_node_count")
+    private long masterNodeCount;
+
+    @Column(name = "node_count")
+    private long nodeCount;
+
+    @Column(name = "cores")
+    private long cores;
+
+    @Column(name = "memory")
+    private long memory;
+
+    @Column(name = "node_root_disk_size")
+    private long nodeRootDiskSize;
+
+    @Column(name = "state")
+    private State  state;
+
+    @Column(name = "key_pair")
+    private String keyPair;
+
+    @Column(name = "endpoint")
+    private String endpoint;
+
+    @Column(name = GenericDao.CREATED_COLUMN)
+    private Date created;
+
+    @Column(name = GenericDao.REMOVED_COLUMN)
+    private Date removed;
+
+    @Column(name = "gc")
+    private boolean checkForGc;
+
+    @Override
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    @Override
+    public String getUuid() {
+        return uuid;
+    }
+
+    public void setUuid(String uuid) {
+        this.uuid = uuid;
+    }
+
+    @Override
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    @Override
+    public String getDescription() {
+        return description;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    @Override
+    public long getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(long zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    @Override
+    public long getKubernetesVersionId() {
+        return kubernetesVersionId;
+    }
+
+    public void setKubernetesVersionId(long kubernetesVersionId) {
+        this.kubernetesVersionId = kubernetesVersionId;
+    }
+
+    @Override
+    public long getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public void setServiceOfferingId(long serviceOfferingId) {
+        this.serviceOfferingId = serviceOfferingId;
+    }
+
+    @Override
+    public long getTemplateId() {
+        return templateId;
+    }
+
+    public void setTemplateId(long templateId) {
+        this.templateId = templateId;
+    }
+
+    @Override
+    public long getNetworkId() {
+        return networkId;
+    }
+
+    public void setNetworkId(long networkId) {
+        this.networkId = networkId;
+    }
+
+    @Override
+    public long getDomainId() {
+        return domainId;
+    }
+
+    public void setDomainId(long domainId) {
+        this.domainId = domainId;
+    }
+
+    @Override
+    public long getAccountId() {
+        return accountId;
+    }
+
+    public void setAccountId(long accountId) {
+        this.accountId = accountId;
+    }
+
+    @Override
+    public long getMasterNodeCount() {
+        return masterNodeCount;
+    }
+
+    public void setMasterNodeCount(long masterNodeCount) {
+        this.masterNodeCount = masterNodeCount;
+    }
+
+    @Override
+    public long getNodeCount() {
+        return nodeCount;
+    }
+
+    public void setNodeCount(long nodeCount) {
+        this.nodeCount = nodeCount;
+    }
+
+    @Override
+    public long getTotalNodeCount() {
+        return this.masterNodeCount + this.nodeCount;
+    }
+
+    @Override
+    public long getCores() {
+        return cores;
+    }
+
+    public void setCores(long cores) {
+        this.cores = cores;
+    }
+
+    @Override
+    public long getMemory() {
+        return memory;
+    }
+
+    public void setMemory(long memory) {
+        this.memory = memory;
+    }
+
+    @Override
+    public long getNodeRootDiskSize() {
+        return nodeRootDiskSize;
+    }
+
+    public void setNodeRootDiskSize(long nodeRootDiskSize) {
+        this.nodeRootDiskSize = nodeRootDiskSize;
+    }
+
+    @Override
+    public State getState() {
+        return state;
+    }
+
+    public void setState(State state) {
+        this.state = state;
+    }
+
+    @Override
+    public String getEndpoint() {
+        return endpoint;
+    }
+
+    public void setEndpoint(String endpoint) {
+        this.endpoint = endpoint;
+    }
+
+    public String getKeyPair() {
+        return keyPair;
+    }
+
+    public void setKeyPair(String keyPair) {
+        this.keyPair = keyPair;
+    }
+
+    @Override
+    public boolean isDisplay() {
+        return true;
+    }
+
+
+    public Date getRemoved() {
+        if (removed == null)
+            return null;
+        return new Date(removed.getTime());
+    }
+
+    @Override
+    public boolean isCheckForGc() {
+        return checkForGc;
+    }
+
+    public void setCheckForGc(boolean check) {
+        checkForGc = check;
+    }
+
+    @Override
+    public Date getCreated() {
+        return created;
+    }
+
+    public KubernetesClusterVO() {
+        this.uuid = UUID.randomUUID().toString();
+    }
+
+    public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId,
+                               long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state,
+                               String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) {
+        this.uuid = UUID.randomUUID().toString();
+        this.name = name;
+        this.description = description;
+        this.zoneId = zoneId;
+        this.kubernetesVersionId = kubernetesVersionId;
+        this.serviceOfferingId = serviceOfferingId;
+        this.templateId = templateId;
+        this.networkId = networkId;
+        this.domainId = domainId;
+        this.accountId = accountId;
+        this.masterNodeCount = masterNodeCount;
+        this.nodeCount = nodeCount;
+        this.state = state;
+        this.keyPair = keyPair;
+        this.cores = cores;
+        this.memory = memory;
+        if (nodeRootDiskSize != null && nodeRootDiskSize > 0) {
+            this.nodeRootDiskSize = nodeRootDiskSize;
+        }
+        this.endpoint = endpoint;
+        this.checkForGc = false;
+    }
+
+    @Override
+    public Class<?> getEntityType() {
+        return KubernetesCluster.class;
+    }
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java
similarity index 62%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java
index 6edfe88..c739920 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java
@@ -14,10 +14,17 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+package com.cloud.kubernetes.cluster;
+
+/**
+ * KubernetesClusterVmMap will describe mapping of ID of KubernetesCuster
+ * and ID of its VirtualMachine. A KubernetesCluster can have multiple VMs
+ * deployed for it therefore a list of KubernetesClusterVmMap are associated
+ * with a KubernetesCluster.
+ * A particular VM can be deployed only for a single KubernetesCluster.
+ */
+public interface KubernetesClusterVmMap {
+    long getId();
+    long getClusterId();
+    long getVmId();
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java
new file mode 100644
index 0000000..edb06e7
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java
@@ -0,0 +1,76 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster;
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+@Entity
+@Table(name = "kubernetes_cluster_vm_map")
+public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
+
+    @Override
+    public long getId() {
+        return id;
+    }
+
+    @Override
+    public long getClusterId() {
+        return clusterId;
+
+    }
+
+    public void setClusterId(long clusterId) {
+
+        this.clusterId = clusterId;
+    }
+
+    @Override
+    public long getVmId() {
+        return vmId;
+    }
+
+    public void setVmId(long vmId) {
+
+        this.vmId = vmId;
+    }
+
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    long id;
+
+    @Column(name = "cluster_id")
+    long clusterId;
+
+    @Column(name = "vm_id")
+    long vmId;
+
+    public KubernetesClusterVmMapVO() {
+
+    }
+
+    public KubernetesClusterVmMapVO(long clusterId, long vmId) {
+        this.vmId = vmId;
+        this.clusterId = clusterId;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java
new file mode 100644
index 0000000..aad9a22
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java
@@ -0,0 +1,380 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.ca.CAManager;
+import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.dc.dao.VlanDao;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
+import com.cloud.network.IpAddress;
+import com.cloud.network.IpAddressManager;
+import com.cloud.network.Network;
+import com.cloud.network.NetworkModel;
+import com.cloud.network.dao.NetworkDao;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.Storage;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.template.TemplateApiService;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.Account;
+import com.cloud.user.dao.AccountDao;
+import com.cloud.user.dao.SSHKeyPairDao;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
+import com.cloud.utils.db.TransactionStatus;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.fsm.NoTransitionException;
+import com.cloud.utils.fsm.StateMachine2;
+import com.cloud.vm.UserVmService;
+import com.cloud.vm.dao.UserVmDao;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterActionWorker {
+
+    public static final String CLUSTER_NODE_VM_USER = "core";
+    public static final int CLUSTER_API_PORT = 6443;
+    public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222;
+
+    protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterActionWorker.class);
+
+    protected StateMachine2<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> _stateMachine = KubernetesCluster.State.getStateMachine();
+
+    @Inject
+    protected CAManager caManager;
+    @Inject
+    protected ConfigurationDao configurationDao;
+    @Inject
+    protected DataCenterDao dataCenterDao;
+    @Inject
+    protected AccountDao accountDao;
+    @Inject
+    protected IpAddressManager ipAddressManager;
+    @Inject
+    protected NetworkOrchestrationService networkMgr;
+    @Inject
+    protected NetworkDao networkDao;
+    @Inject
+    protected NetworkModel networkModel;
+    @Inject
+    protected ServiceOfferingDao serviceOfferingDao;
+    @Inject
+    protected SSHKeyPairDao sshKeyPairDao;
+    @Inject
+    protected VMTemplateDao templateDao;
+    @Inject
+    protected TemplateApiService templateService;
+    @Inject
+    protected UserVmDao userVmDao;
+    @Inject
+    protected UserVmService userVmService;
+    @Inject
+    protected VlanDao vlanDao;
+
+    protected KubernetesClusterDao kubernetesClusterDao;
+    protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao;
+    protected KubernetesClusterDetailsDao kubernetesClusterDetailsDao;
+    protected KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
+
+    protected KubernetesCluster kubernetesCluster;
+    protected Account owner;
+    protected File sshKeyFile;
+    protected String publicIpAddress;
+    protected int sshPort;
+
+    protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
+        this.kubernetesCluster = kubernetesCluster;
+        this.kubernetesClusterDao = clusterManager.kubernetesClusterDao;
+        this.kubernetesClusterDetailsDao = clusterManager.kubernetesClusterDetailsDao;
+        this.kubernetesClusterVmMapDao = clusterManager.kubernetesClusterVmMapDao;
+        this.kubernetesSupportedVersionDao = clusterManager.kubernetesSupportedVersionDao;
+    }
+
+    protected void init() {
+        this.owner = accountDao.findById(kubernetesCluster.getAccountId());
+        this.sshKeyFile = getManagementServerSshPublicKeyFile();
+    }
+
+    protected String readResourceFile(String resource) throws IOException {
+        return IOUtils.toString(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream(resource)), StringUtils.getPreferredCharset());
+    }
+
+    protected void logMessage(final Level logLevel, final String message, final Exception e) {
+        if (logLevel == Level.INFO) {
+            if (LOGGER.isInfoEnabled()) {
+                if (e != null) {
+                    LOGGER.info(message, e);
+                } else {
+                    LOGGER.info(message);
+                }
+            }
+        } else if (logLevel == Level.DEBUG) {
+            if (LOGGER.isDebugEnabled()) {
+                if (e != null) {
+                    LOGGER.debug(message, e);
+                } else {
+                    LOGGER.debug(message);
+                }
+            }
+        } else if (logLevel == Level.WARN) {
+            if (e != null) {
+                LOGGER.warn(message, e);
+            } else {
+                LOGGER.warn(message);
+            }
+        } else {
+            if (e != null) {
+                LOGGER.error(message, e);
+            } else {
+                LOGGER.error(message);
+            }
+        }
+    }
+
+    protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster,
+                                                    final List<UserVm> clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
+        logMessage(logLevel, message, e);
+        stateTransitTo(kubernetesCluster.getId(), event);
+        detachIsoKubernetesVMs(clusterVMs);
+        if (e == null) {
+            throw new CloudRuntimeException(message);
+        }
+        throw new CloudRuntimeException(message, e);
+    }
+
+    protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
+        logMessage(logLevel, message, e);
+        if (kubernetesClusterId != null && event != null) {
+            stateTransitTo(kubernetesClusterId, event);
+        }
+        if (e == null) {
+            throw new CloudRuntimeException(message);
+        }
+        throw new CloudRuntimeException(message, e);
+    }
+
+    protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event) throws CloudRuntimeException {
+        logTransitStateAndThrow(logLevel, message, kubernetesClusterId, event, null);
+    }
+
+    protected void logAndThrow(final Level logLevel, final String message) throws CloudRuntimeException {
+        logTransitStateAndThrow(logLevel, message, null, null, null);
+    }
+
+    protected void logAndThrow(final Level logLevel, final String message, final Exception ex) throws CloudRuntimeException {
+        logTransitStateAndThrow(logLevel, message, null, null, ex);
+    }
+
+    protected File getManagementServerSshPublicKeyFile() {
+        boolean devel = Boolean.parseBoolean(configurationDao.getValue("developer"));
+        String keyFile = String.format("%s/.ssh/id_rsa", System.getProperty("user.home"));
+        if (devel) {
+            keyFile += ".cloud";
+        }
+        return new File(keyFile);
+    }
+
+    protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) {
+        return Transaction.execute(new TransactionCallback<KubernetesClusterVmMapVO>() {
+            @Override
+            public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) {
+                KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId);
+                kubernetesClusterVmMapDao.persist(newClusterVmMap);
+                return newClusterVmMap;
+            }
+        });
+    }
+
+    private UserVm fetchMasterVmIfMissing(final UserVm masterVm) {
+        if (masterVm != null) {
+            return masterVm;
+        }
+        List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        if (CollectionUtils.isEmpty(clusterVMs)) {
+            LOGGER.warn(String.format("Unable to retrieve VMs for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+            return null;
+        }
+        List<Long> vmIds = new ArrayList<>();
+        for (KubernetesClusterVmMapVO vmMap : clusterVMs) {
+            vmIds.add(vmMap.getVmId());
+        }
+        Collections.sort(vmIds);
+        return userVmDao.findById(vmIds.get(0));
+    }
+
+    protected String getMasterVmPrivateIp() {
+        String ip = null;
+        UserVm vm = fetchMasterVmIfMissing(null);
+        if (vm != null) {
+            ip = vm.getPrivateIpAddress();
+        }
+        return ip;
+    }
+
+    protected Pair<String, Integer> getKubernetesClusterServerIpSshPort(UserVm masterVm) {
+        int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT;
+        KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS);
+        if (detail != null && !Strings.isNullOrEmpty(detail.getValue())) {
+            return new Pair<>(detail.getValue(), port);
+        }
+        Network network = networkDao.findById(kubernetesCluster.getNetworkId());
+        if (network == null) {
+            LOGGER.warn(String.format("Network for Kubernetes cluster ID: %s cannot be found", kubernetesCluster.getUuid()));
+            return new Pair<>(null, port);
+        }
+        if (Network.GuestType.Isolated.equals(network.getGuestType())) {
+            List<? extends IpAddress> addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
+            if (CollectionUtils.isEmpty(addresses)) {
+                LOGGER.warn(String.format("No public IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
+                return new Pair<>(null, port);
+            }
+            for (IpAddress address : addresses) {
+                if (address.isSourceNat()) {
+                    return new Pair<>(address.getAddress().addr(), port);
+                }
+            }
+            LOGGER.warn(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
+            return new Pair<>(null, port);
+        } else if (Network.GuestType.Shared.equals(network.getGuestType())) {
+            port = 22;
+            masterVm = fetchMasterVmIfMissing(masterVm);
+            if (masterVm == null) {
+                LOGGER.warn(String.format("Unable to retrieve master VM for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+                return new Pair<>(null, port);
+            }
+            return new Pair<>(masterVm.getPrivateIpAddress(), port);
+        }
+        LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        return  new Pair<>(null, port);
+    }
+
+    protected void attachIsoKubernetesVMs(List<UserVm> clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException {
+        KubernetesSupportedVersion version = kubernetesSupportedVersion;
+        if (kubernetesSupportedVersion == null) {
+            version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
+        }
+        KubernetesCluster.Event failedEvent = KubernetesCluster.Event.OperationFailed;
+        KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        if (cluster != null && cluster.getState() == KubernetesCluster.State.Starting) {
+            failedEvent = KubernetesCluster.Event.CreateFailed;
+        }
+        if (version == null) {
+            logTransitStateAndThrow(Level.ERROR, String .format("Unable to find Kubernetes version for cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
+        }
+        VMTemplateVO iso = templateDao.findById(version.getIsoId());
+        if (iso == null) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not found.",  kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
+        }
+        if (!iso.getFormat().equals(Storage.ImageFormat.ISO)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Invalid Binaries ISO.",  kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
+        }
+        if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not active.",  kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
+        }
+        for (UserVm vm : clusterVMs) {
+            try {
+                templateService.attachIso(iso.getId(), vm.getId());
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Attached binaries ISO for VM: %s in cluster: %s", vm.getUuid(), kubernetesCluster.getName()));
+                }
+            } catch (CloudRuntimeException ex) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM: %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex);
+            }
+        }
+    }
+
+    protected void attachIsoKubernetesVMs(List<UserVm> clusterVMs) throws CloudRuntimeException {
+        attachIsoKubernetesVMs(clusterVMs, null);
+    }
+
+    protected void detachIsoKubernetesVMs(List<UserVm> clusterVMs) {
+        for (UserVm vm : clusterVMs) {
+            boolean result = false;
+            try {
+                result = templateService.detachIso(vm.getId());
+            } catch (CloudRuntimeException ex) {
+                LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid()), ex);
+            }
+            if (result) {
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Detached Kubernetes binaries from VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
+                }
+                continue;
+            }
+            LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid()));
+        }
+    }
+
+    protected List<KubernetesClusterVmMapVO> getKubernetesClusterVMMaps() {
+        List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        if (!CollectionUtils.isEmpty(clusterVMs)) {
+            clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId())));
+        }
+        return clusterVMs;
+    }
+
+    protected List<UserVm> getKubernetesClusterVMs() {
+        List<UserVm> vmList = new ArrayList<>();
+        List<KubernetesClusterVmMapVO> clusterVMs = getKubernetesClusterVMMaps();
+        if (!CollectionUtils.isEmpty(clusterVMs)) {
+            for (KubernetesClusterVmMapVO vmMap : clusterVMs) {
+                vmList.add(userVmDao.findById(vmMap.getVmId()));
+            }
+        }
+        return vmList;
+    }
+
+    protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) {
+        KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
+        try {
+            return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao);
+        } catch (NoTransitionException nte) {
+            LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster ID: %s in state %s on event %s", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString(), e.toString()), nte);
+            return false;
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java
new file mode 100644
index 0000000..8d7f427
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java
@@ -0,0 +1,243 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.context.CallContext;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.PermissionDeniedException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMap;
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.dao.NetworkVO;
+import com.cloud.network.rules.FirewallRule;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.User;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.ReservationContext;
+import com.cloud.vm.ReservationContextImpl;
+import com.cloud.vm.UserVmVO;
+
+public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceModifierActionWorker {
+
+    @Inject
+    protected AccountManager accountManager;
+
+    private List<KubernetesClusterVmMapVO> clusterVMs;
+
+    public KubernetesClusterDestroyWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+    }
+
+    private void validateClusterSate() {
+        if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Running)
+                || kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)
+                || kubernetesCluster.getState().equals(KubernetesCluster.State.Alert)
+                || kubernetesCluster.getState().equals(KubernetesCluster.State.Error)
+                || kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) {
+            String msg = String.format("Cannot perform delete operation on cluster ID: %s in state: %s",kubernetesCluster.getUuid(), kubernetesCluster.getState());
+            LOGGER.warn(msg);
+            throw new PermissionDeniedException(msg);
+        }
+    }
+
+    private boolean destroyClusterVMs() {
+        boolean vmDestroyed = true;
+        if (!CollectionUtils.isEmpty(clusterVMs)) {
+            for (KubernetesClusterVmMapVO clusterVM : clusterVMs) {
+                long vmID = clusterVM.getVmId();
+
+                // delete only if VM exists and is not removed
+                UserVmVO userVM = userVmDao.findById(vmID);
+                if (userVM == null || userVM.isRemoved()) {
+                    continue;
+                }
+                try {
+                    UserVm vm = userVmService.destroyVm(vmID, true);
+                    if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
+                        LOGGER.warn(String.format("Unable to expunge VM '%s' ID: %s, destroying Kubernetes cluster will probably fail"
+                                , vm.getInstanceName()
+                                , vm.getUuid()));
+                    }
+                    kubernetesClusterVmMapDao.expunge(clusterVM.getId());
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Destroyed VM ID: %s as part of Kubernetes cluster ID: %s cleanup", vm.getUuid(), kubernetesCluster.getUuid()));
+                    }
+                } catch (ResourceUnavailableException | ConcurrentOperationException e) {
+                    LOGGER.warn(String.format("Failed to destroy VM ID: %s part of the Kubernetes cluster ID: %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getUuid(), kubernetesCluster.getUuid()), e);
+                    return false;
+                }
+            }
+        }
+        return vmDestroyed;
+    }
+
+    private boolean updateKubernetesClusterEntryForGC() {
+        KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        kubernetesClusterVO.setCheckForGc(true);
+        return kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
+    }
+
+    private void destroyKubernetesClusterNetwork() throws ManagementServerException {
+        NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId());
+        if (network != null && network.getRemoved() == null) {
+            Account owner = accountManager.getAccount(network.getAccountId());
+            User callerUser = accountManager.getActiveUser(CallContext.current().getCallingUserId());
+            ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner);
+            boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true);
+            if (!networkDestroyed) {
+                String msg = String.format("Failed to destroy network ID: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid());
+                LOGGER.warn(msg);
+                throw new ManagementServerException(msg);
+            }
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Destroyed network: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid()));
+            }
+        }
+    }
+
+    private void deleteKubernetesClusterNetworkRules() throws ManagementServerException {
+        NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId());
+        if (network == null || !Network.GuestType.Isolated.equals(network.getGuestType())) {
+            return;
+        }
+        List<Long> removedVmIds = new ArrayList<>();
+        if (!CollectionUtils.isEmpty(clusterVMs)) {
+            for (KubernetesClusterVmMapVO clusterVM : clusterVMs) {
+                removedVmIds.add(clusterVM.getVmId());
+            }
+        }
+        IpAddress publicIp = getSourceNatIp(network);
+        if (publicIp == null) {
+            throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s", network.getUuid()));
+        }
+        try {
+            removeLoadBalancingRule(publicIp, network, owner, CLUSTER_API_PORT);
+        } catch (ResourceUnavailableException e) {
+            throw new ManagementServerException(String.format("Failed to KubernetesCluster load balancing rule for network ID: %s", network.getUuid()));
+        }
+        FirewallRule firewallRule = removeApiFirewallRule(publicIp);
+        if (firewallRule == null) {
+            logMessage(Level.WARN, "Firewall rule for API access can't be removed", null);
+        }
+        firewallRule = removeSshFirewallRule(publicIp);
+        if (firewallRule == null) {
+            logMessage(Level.WARN, "Firewall rule for SSH access can't be removed", null);
+        }
+        try {
+            removePortForwardingRules(publicIp, network, owner, removedVmIds);
+        } catch (ResourceUnavailableException e) {
+            throw new ManagementServerException(String.format("Failed to KubernetesCluster port forwarding rules for network ID: %s", network.getUuid()));
+        }
+    }
+
+    private void validateClusterVMsDestroyed() {
+        if(clusterVMs!=null  && !clusterVMs.isEmpty()) { // Wait for few seconds to get all VMs really expunged
+            final int maxRetries = 3;
+            int retryCounter = 0;
+            while (retryCounter < maxRetries) {
+                boolean allVMsRemoved = true;
+                for (KubernetesClusterVmMap clusterVM : clusterVMs) {
+                    UserVmVO userVM = userVmDao.findById(clusterVM.getVmId());
+                    if (userVM != null && !userVM.isRemoved()) {
+                        allVMsRemoved = false;
+                        break;
+                    }
+                }
+                if (allVMsRemoved) {
+                    break;
+                }
+                try {
+                    Thread.sleep(10000);
+                } catch (InterruptedException ie) {}
+                retryCounter++;
+            }
+        }
+    }
+
+    public boolean destroy() throws CloudRuntimeException {
+        init();
+        validateClusterSate();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Destroying Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested);
+        this.clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        boolean vmsDestroyed = destroyClusterVMs();
+        boolean cleanupNetwork = true;
+        final KubernetesClusterDetailsVO clusterDetails = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "networkCleanup");
+        if (clusterDetails != null) {
+            cleanupNetwork = Boolean.parseBoolean(clusterDetails.getValue());
+        }
+        // if there are VM's that were not expunged, we can not delete the network
+        if (vmsDestroyed) {
+            if (cleanupNetwork) {
+                validateClusterVMsDestroyed();
+                try {
+                    destroyKubernetesClusterNetwork();
+                } catch (ManagementServerException e) {
+                    String msg = String.format("Failed to destroy network of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid());
+                    LOGGER.warn(msg, e);
+                    updateKubernetesClusterEntryForGC();
+                    throw new CloudRuntimeException(msg, e);
+                }
+            } else {
+                try {
+                    deleteKubernetesClusterNetworkRules();
+                } catch (ManagementServerException e) {
+                    String msg = String.format("Failed to remove network rules of Kubernetes cluster ID: %s", kubernetesCluster.getUuid());
+                    LOGGER.warn(msg, e);
+                    updateKubernetesClusterEntryForGC();
+                    throw new CloudRuntimeException(msg, e);
+                }
+            }
+        } else {
+            String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid());
+            LOGGER.warn(msg);
+            updateKubernetesClusterEntryForGC();
+            throw new CloudRuntimeException(msg);
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        boolean deleted = kubernetesClusterDao.remove(kubernetesCluster.getId());
+        if (!deleted) {
+            logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), null);
+            updateKubernetesClusterEntryForGC();
+            return false;
+        }
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Kubernetes cluster ID: %s is successfully deleted", kubernetesCluster.getUuid()));
+        }
+        return true;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java
new file mode 100644
index 0000000..5d25614
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java
@@ -0,0 +1,513 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd;
+import org.apache.cloudstack.api.command.user.vm.StartVMCmd;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+
+import com.cloud.capacity.CapacityManager;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.dao.FirewallRulesDao;
+import com.cloud.network.dao.LoadBalancerDao;
+import com.cloud.network.dao.LoadBalancerVO;
+import com.cloud.network.firewall.FirewallService;
+import com.cloud.network.lb.LoadBalancingRulesService;
+import com.cloud.network.rules.FirewallRule;
+import com.cloud.network.rules.FirewallRuleVO;
+import com.cloud.network.rules.PortForwardingRuleVO;
+import com.cloud.network.rules.RulesService;
+import com.cloud.network.rules.dao.PortForwardingRulesDao;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.resource.ResourceManager;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.Account;
+import com.cloud.user.SSHKeyPairVO;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallbackWithException;
+import com.cloud.utils.db.TransactionStatus;
+import com.cloud.utils.exception.ExecutionException;
+import com.cloud.utils.net.Ip;
+import com.cloud.utils.net.NetUtils;
+import com.cloud.vm.Nic;
+import com.cloud.vm.UserVmManager;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterResourceModifierActionWorker extends KubernetesClusterActionWorker {
+
+    @Inject
+    protected CapacityManager capacityManager;
+    @Inject
+    protected ClusterDao clusterDao;
+    @Inject
+    protected ClusterDetailsDao clusterDetailsDao;
+    @Inject
+    protected FirewallRulesDao firewallRulesDao;
+    @Inject
+    protected FirewallService firewallService;
+    @Inject
+    protected LoadBalancingRulesService lbService;
+    @Inject
+    protected RulesService rulesService;
+    @Inject
+    protected PortForwardingRulesDao portForwardingRulesDao;
+    @Inject
+    protected ResourceManager resourceManager;
+    @Inject
+    protected LoadBalancerDao loadBalancerDao;
+    @Inject
+    protected VMInstanceDao vmInstanceDao;
+    @Inject
+    protected UserVmManager userVmManager;
+
+    protected String kubernetesClusterNodeNamePrefix;
+
+    protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+    }
+
+    protected void init() {
+        super.init();
+        kubernetesClusterNodeNamePrefix = getKubernetesClusterNodeNamePrefix();
+    }
+
+    private String getKubernetesNodeConfig(final String joinIp, final boolean ejectIso) throws IOException {
+        String k8sNodeConfig = readResourceFile("/conf/k8s-node.yml");
+        final String sshPubKey = "{{ k8s.ssh.pub.key }}";
+        final String joinIpKey = "{{ k8s_master.join_ip }}";
+        final String clusterTokenKey = "{{ k8s_master.cluster.token }}";
+        final String ejectIsoKey = "{{ k8s.eject.iso }}";
+        String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
+        String sshKeyPair = kubernetesCluster.getKeyPair();
+        if (!Strings.isNullOrEmpty(sshKeyPair)) {
+            SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
+            if (sshkp != null) {
+                pubKey += "\n  - \"" + sshkp.getPublicKey() + "\"";
+            }
+        }
+        k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey);
+        k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp);
+        k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
+        k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
+        /* genarate /.docker/config.json file on the nodes only if Kubernetes cluster is created to
+         * use docker private registry */
+        String dockerUserName = null;
+        String dockerPassword = null;
+        String dockerRegistryUrl = null;
+        String dockerRegistryEmail = null;
+        List<KubernetesClusterDetailsVO> details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId());
+        for (KubernetesClusterDetailsVO detail : details) {
+            if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) {
+                dockerUserName = detail.getValue();
+            }
+            if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) {
+                dockerPassword = detail.getValue();
+            }
+            if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) {
+                dockerRegistryUrl = detail.getValue();
+            }
+            if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_EMAIL)) {
+                dockerRegistryEmail = detail.getValue();
+            }
+        }
+        if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) {
+            // do write file for  /.docker/config.json through the code instead of k8s-node.yml as we can no make a section
+            // optional or conditionally applied
+            String dockerConfigString = "write-files:\n" +
+                    "  - path: /.docker/config.json\n" +
+                    "    owner: core:core\n" +
+                    "    permissions: '0644'\n" +
+                    "    content: |\n" +
+                    "      {\n" +
+                    "        \"auths\": {\n" +
+                    "          {{docker.url}}: {\n" +
+                    "            \"auth\": {{docker.secret}},\n" +
+                    "            \"email\": {{docker.email}}\n" +
+                    "          }\n" +
+                    "         }\n" +
+                    "      }";
+            k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString);
+            final String dockerUrlKey = "{{docker.url}}";
+            final String dockerAuthKey = "{{docker.secret}}";
+            final String dockerEmailKey = "{{docker.email}}";
+            final String usernamePasswordKey = dockerUserName + ":" + dockerPassword;
+            String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(StringUtils.getPreferredCharset()));
+            k8sNodeConfig = k8sNodeConfig.replace(dockerUrlKey, "\"" + dockerRegistryUrl + "\"");
+            k8sNodeConfig = k8sNodeConfig.replace(dockerAuthKey, "\"" + base64Auth + "\"");
+            k8sNodeConfig = k8sNodeConfig.replace(dockerEmailKey, "\"" + dockerRegistryEmail + "\"");
+        }
+        return k8sNodeConfig;
+    }
+
+    protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
+        final int cpu_requested = offering.getCpu() * offering.getSpeed();
+        final long ram_requested = offering.getRamSize() * 1024L * 1024L;
+        List<HostVO> hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId());
+        final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
+        for (HostVO h : hosts) {
+            hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
+        }
+        boolean suitable_host_found = false;
+        for (int i = 1; i <= nodesCount + 1; i++) {
+            suitable_host_found = false;
+            for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
+                Pair<HostVO, Integer> hp = hostEntry.getValue();
+                HostVO h = hp.first();
+                int reserved = hp.second();
+                reserved++;
+                ClusterVO cluster = clusterDao.findById(h.getClusterId());
+                ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
+                ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
+                Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+                Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+                if (LOGGER.isDebugEnabled()) {
+                    LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved));
+                }
+                if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved));
+                    }
+                    hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
+                    suitable_host_found = true;
+                    break;
+                }
+            }
+            if (!suitable_host_found) {
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d", zone.getUuid(), i));
+                }
+                break;
+            }
+        }
+        if (suitable_host_found) {
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid()));
+            }
+            return new DeployDestination(zone, null, null, null);
+        }
+        String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%1$s memory=%2$s)",
+                cpu_requested * nodesCount, ram_requested * nodesCount);
+        LOGGER.warn(msg);
+        throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
+    }
+
+    protected DeployDestination plan() throws InsufficientServerCapacityException {
+        ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(String.format("Checking deployment destination for Kubernetes cluster ID: %s in zone ID: %s", kubernetesCluster.getUuid(), zone.getUuid()));
+        }
+        return plan(kubernetesCluster.getTotalNodeCount(), zone, offering);
+    }
+
+    protected void startKubernetesVM(final UserVm vm) throws ManagementServerException {
+        try {
+            StartVMCmd startVm = new StartVMCmd();
+            startVm = ComponentContext.inject(startVm);
+            Field f = startVm.getClass().getDeclaredField("id");
+            f.setAccessible(true);
+            f.set(startVm, vm.getId());
+            userVmService.startVirtualMachine(startVm);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Started VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
+            }
+        } catch (IllegalAccessException | NoSuchFieldException | ExecutionException |
+                ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) {
+            throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), ex);
+        }
+
+        UserVm startVm = userVmDao.findById(vm.getId());
+        if (!startVm.getState().equals(VirtualMachine.State.Running)) {
+            throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+    }
+
+    protected List<UserVm> provisionKubernetesClusterNodeVms(final long nodeCount, final int offset, final String publicIpAddress) throws ManagementServerException,
+            ResourceUnavailableException, InsufficientCapacityException {
+        List<UserVm> nodes = new ArrayList<>();
+        for (int i = offset + 1; i <= nodeCount; i++) {
+            UserVm vm = createKubernetesNode(publicIpAddress, i);
+            addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
+            startKubernetesVM(vm);
+            vm = userVmDao.findById(vm.getId());
+            if (vm == null) {
+                throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
+            }
+            nodes.add(vm);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Provisioned node VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
+            }
+        }
+        return nodes;
+    }
+
+    protected List<UserVm> provisionKubernetesClusterNodeVms(final long nodeCount, final String publicIpAddress) throws ManagementServerException,
+            ResourceUnavailableException, InsufficientCapacityException {
+        return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress);
+    }
+
+    protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException,
+            ResourceUnavailableException, InsufficientCapacityException {
+        UserVm nodeVm = null;
+        DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
+        List<Long> networkIds = new ArrayList<Long>();
+        networkIds.add(kubernetesCluster.getNetworkId());
+        Account owner = accountDao.findById(kubernetesCluster.getAccountId());
+        Network.IpAddresses addrs = new Network.IpAddresses(null, null);
+        long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
+        Map<String, String> customParameterMap = new HashMap<String, String>();
+        if (rootDiskSize > 0) {
+            customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
+        }
+        String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance));
+        String k8sNodeConfig = null;
+        try {
+            k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
+        } catch (IOException e) {
+            logAndThrow(Level.ERROR, "Failed to read Kubernetes node configuration file", e);
+        }
+        String base64UserData = Base64.encodeBase64String(k8sNodeConfig.getBytes(StringUtils.getPreferredCharset()));
+        nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
+                hostName, hostName, null, null, null,
+                null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
+                null, addrs, null, null, null, customParameterMap, null, null, null, null);
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Created node VM ID: %s, %s in the Kubernetes cluster ID: %s", nodeVm.getUuid(), hostName, kubernetesCluster.getUuid()));
+        }
+        return nodeVm;
+    }
+
+    protected IpAddress getSourceNatIp(Network network) {
+        List<? extends IpAddress> addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
+        if (CollectionUtils.isEmpty(addresses)) {
+            return null;
+        }
+        for (IpAddress address : addresses) {
+            if (address.isSourceNat()) {
+                return address;
+            }
+        }
+        return null;
+    }
+
+    protected void provisionFirewallRules(final IpAddress publicIp, final Account account, int startPort, int endPort) throws NoSuchFieldException,
+            IllegalAccessException, ResourceUnavailableException, NetworkRuleConflictException {
+        List<String> sourceCidrList = new ArrayList<String>();
+        sourceCidrList.add("0.0.0.0/0");
+
+        CreateFirewallRuleCmd rule = new CreateFirewallRuleCmd();
+        rule = ComponentContext.inject(rule);
+
+        Field addressField = rule.getClass().getDeclaredField("ipAddressId");
+        addressField.setAccessible(true);
+        addressField.set(rule, publicIp.getId());
+
+        Field protocolField = rule.getClass().getDeclaredField("protocol");
+        protocolField.setAccessible(true);
+        protocolField.set(rule, "TCP");
+
+        Field startPortField = rule.getClass().getDeclaredField("publicStartPort");
+        startPortField.setAccessible(true);
+        startPortField.set(rule, startPort);
+
+        Field endPortField = rule.getClass().getDeclaredField("publicEndPort");
+        endPortField.setAccessible(true);
+        endPortField.set(rule, endPort);
+
+        Field cidrField = rule.getClass().getDeclaredField("cidrlist");
+        cidrField.setAccessible(true);
+        cidrField.set(rule, sourceCidrList);
+
+        firewallService.createIngressFirewallRule(rule);
+        firewallService.applyIngressFwRules(publicIp.getId(), account);
+    }
+
+    /**
+     * To provision SSH port forwarding rules for the given Kubernetes cluster
+     * for its given virtual machines
+     * @param publicIp
+     * @param network
+     * @param account
+     * @param List<Long> clusterVMIds (when empty then method must be called while
+     *                  down-scaling of the KubernetesCluster therefore no new rules
+     *                  to be added)
+     * @param firewallRuleSourcePortStart
+     * @throws ResourceUnavailableException
+     * @throws NetworkRuleConflictException
+     */
+    protected void provisionSshPortForwardingRules(IpAddress publicIp, Network network, Account account,
+                                                   List<Long> clusterVMIds, int firewallRuleSourcePortStart) throws ResourceUnavailableException,
+            NetworkRuleConflictException {
+        if (!CollectionUtils.isEmpty(clusterVMIds)) {
+            final long publicIpId = publicIp.getId();
+            final long networkId = network.getId();
+            final long accountId = account.getId();
+            final long domainId = account.getDomainId();
+            for (int i = 0; i < clusterVMIds.size(); ++i) {
+                long vmId = clusterVMIds.get(i);
+                Nic vmNic = networkModel.getNicInNetwork(vmId, networkId);
+                final Ip vmIp = new Ip(vmNic.getIPv4Address());
+                final long vmIdFinal = vmId;
+                final int srcPortFinal = firewallRuleSourcePortStart + i;
+
+                PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>() {
+                    @Override
+                    public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
+                        PortForwardingRuleVO newRule =
+                                new PortForwardingRuleVO(null, publicIpId,
+                                        srcPortFinal, srcPortFinal,
+                                        vmIp,
+                                        22, 22,
+                                        "tcp", networkId, accountId, domainId, vmIdFinal);
+                        newRule.setDisplay(true);
+                        newRule.setState(FirewallRule.State.Add);
+                        newRule = portForwardingRulesDao.persist(newRule);
+                        return newRule;
+                    }
+                });
+                rulesService.applyPortForwardingRules(publicIp.getId(), account);
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Provisioned SSH port forwarding rule from port %d to 22 on %s to the VM IP : %s in Kubernetes cluster ID: %s", srcPortFinal, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getUuid()));
+                }
+            }
+        }
+    }
+
+    protected FirewallRule removeApiFirewallRule(final IpAddress publicIp) {
+        FirewallRule rule = null;
+        List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall);
+        for (FirewallRuleVO firewallRule : firewallRules) {
+            if (firewallRule.getSourcePortStart() == CLUSTER_API_PORT &&
+                    firewallRule.getSourcePortEnd() == CLUSTER_API_PORT) {
+                rule = firewallRule;
+                firewallService.revokeIngressFwRule(firewallRule.getId(), true);
+                break;
+            }
+        }
+        return rule;
+    }
+
+    protected FirewallRule removeSshFirewallRule(final IpAddress publicIp) {
+        FirewallRule rule = null;
+        List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall);
+        for (FirewallRuleVO firewallRule : firewallRules) {
+            if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT) {
+                rule = firewallRule;
+                firewallService.revokeIngressFwRule(firewallRule.getId(), true);
+                break;
+            }
+        }
+        return rule;
+    }
+
+    protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, final List<Long> removedVMIds) throws ResourceUnavailableException {
+        if (!CollectionUtils.isEmpty(removedVMIds)) {
+            for (Long vmId : removedVMIds) {
+                List<PortForwardingRuleVO> pfRules = portForwardingRulesDao.listByNetwork(network.getId());
+                for (PortForwardingRuleVO pfRule : pfRules) {
+                    if (pfRule.getVirtualMachineId() == vmId) {
+                        portForwardingRulesDao.remove(pfRule.getId());
+                        break;
+                    }
+                }
+            }
+            rulesService.applyPortForwardingRules(publicIp.getId(), account);
+        }
+    }
+
+    protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network,
+                                           final Account account, final int port) throws ResourceUnavailableException {
+        List<LoadBalancerVO> rules = loadBalancerDao.listByIpAddress(publicIp.getId());
+        for (LoadBalancerVO rule : rules) {
+            if (rule.getNetworkId() == network.getId() &&
+                    rule.getAccountId() == account.getId() &&
+                    rule.getSourcePortStart() == port &&
+                    rule.getSourcePortEnd() == port) {
+                lbService.deleteLoadBalancerRule(rule.getId(), true);
+                break;
+            }
+        }
+    }
+
+    protected String getKubernetesClusterNodeNamePrefix() {
+        String prefix = kubernetesCluster.getName();
+        if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
+            prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
+            if (prefix.length() == 0) {
+                prefix = kubernetesCluster.getUuid();
+            }
+            prefix = "k8s-" + prefix;
+        }
+        if (prefix.length() > 40) {
+            prefix = prefix.substring(0, 40);
+        }
+        return prefix;
+    }
+
+    protected String getKubernetesClusterNodeAvailableName(final String hostName) {
+        String name = hostName;
+        int suffix = 1;
+        while (vmInstanceDao.findVMByHostName(name) != null) {
+            name = String.format("%s-%d", hostName, suffix);
+            suffix++;
+        }
+        return name;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java
new file mode 100644
index 0000000..0d6a028
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java
@@ -0,0 +1,431 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.context.CallContext;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+
+import com.cloud.dc.DataCenter;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.exception.VirtualMachineMigrationException;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.rules.FirewallRule;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.ssh.SshHelper;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker {
+
+    @Inject
+    protected VMInstanceDao vmInstanceDao;
+
+    private ServiceOffering serviceOffering;
+    private Long clusterSize;
+    private KubernetesCluster.State originalState;
+    private Network network;
+    private long scaleTimeoutTime;
+
+    public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster,
+                                        final ServiceOffering serviceOffering,
+                                        final Long clusterSize,
+                                        final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+        this.serviceOffering = serviceOffering;
+        this.clusterSize = clusterSize;
+        this.originalState = kubernetesCluster.getState();
+    }
+
+    protected void init() {
+        super.init();
+        this.network = networkDao.findById(kubernetesCluster.getNetworkId());
+    }
+
+    private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message, final Exception e) throws CloudRuntimeException {
+        KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        if (cluster != null && KubernetesCluster.State.Scaling.equals(cluster.getState())) {
+            logTransitStateAndThrow(logLevel, message, kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
+        } else {
+            logAndThrow(logLevel, message, e);
+        }
+    }
+
+    private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message) throws CloudRuntimeException {
+        logTransitStateToFailedIfNeededAndThrow(logLevel, message, null);
+    }
+
+    /**
+     * Scale network rules for an existing Kubernetes cluster while scaling it
+     * Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n.
+     * Also remove port forwarding rules for removed virtual machines and create port-forwarding rule
+     * to forward public IP traffic to all node VMs' private IP.
+     * @param clusterVMIds
+     * @param removedVMIds
+     * @throws ManagementServerException
+     */
+    private void scaleKubernetesClusterNetworkRules(final List<Long> clusterVMIds, final List<Long> removedVMIds) throws ManagementServerException {
+        if (!Network.GuestType.Isolated.equals(network.getGuestType())) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid()));
+            }
+            return;
+        }
+        IpAddress publicIp = getSourceNatIp(network);
+        if (publicIp == null) {
+            throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
+        }
+
+        // Remove existing SSH firewall rules
+        FirewallRule firewallRule = removeSshFirewallRule(publicIp);
+        if (firewallRule == null) {
+            throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned");
+        }
+        int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
+        final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount());
+        // Provision new SSH firewall rules
+        try {
+            provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1);
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Provisioned  firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s",
+                        CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
+            }
+        } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) {
+            throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+
+        try {
+            removePortForwardingRules(publicIp, network, owner, removedVMIds);
+        } catch (ResourceUnavailableException e) {
+            throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+
+        try {
+            provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1);
+        } catch (ResourceUnavailableException | NetworkRuleConflictException e) {
+            throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+    }
+
+    private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory,
+                                                             final Long size, final Long serviceOfferingId) {
+        return Transaction.execute((TransactionCallback<KubernetesClusterVO>) status -> {
+            KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId());
+            updatedCluster.setCores(cores);
+            updatedCluster.setMemory(memory);
+            if (size != null) {
+                updatedCluster.setNodeCount(size);
+            }
+            if (serviceOfferingId != null) {
+                updatedCluster.setServiceOfferingId(serviceOfferingId);
+            }
+            kubernetesClusterDao.persist(updatedCluster);
+            return updatedCluster;
+        });
+    }
+
+    private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException {
+        final ServiceOffering serviceOffering = newServiceOffering == null ?
+                serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering;
+        final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId();
+        final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount());
+        final long cores = serviceOffering.getCpu() * size;
+        final long memory = serviceOffering.getRamSize() * size;
+        KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId);
+        if (kubernetesClusterVO == null) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster",
+                    kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        return kubernetesClusterVO;
+    }
+
+    private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) {
+        File pkFile = getManagementServerSshPublicKeyFile();
+        int retryCounter = 0;
+        String hostName = userVm.getHostName();
+        if (!Strings.isNullOrEmpty(hostName)) {
+            hostName = hostName.toLowerCase();
+        }
+        while (retryCounter < retries) {
+            retryCounter++;
+            try {
+                Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
+                        pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
+                        10000, 10000, 60000);
+                if (!result.first()) {
+                    LOGGER.warn(String.format("Draining node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid()));
+                } else {
+                    result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
+                            pkFile, null, String.format("sudo kubectl delete node %s", hostName),
+                            10000, 10000, 30000);
+                    if (result.first()) {
+                        return true;
+                    } else {
+                        LOGGER.warn(String.format("Deleting node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid()));
+                    }
+                }
+                break;
+            } catch (Exception e) {
+                String msg = String.format("Failed to remove Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid());
+                LOGGER.warn(msg, e);
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ie) {
+                LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s on VM ID: %s removal", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie);
+            }
+            retryCounter++;
+        }
+        return false;
+    }
+
+    private void validateKubernetesClusterScaleOfferingParameters() throws CloudRuntimeException {
+        if (KubernetesCluster.State.Created.equals(originalState)) {
+            return;
+        }
+        final long originalNodeCount = kubernetesCluster.getTotalNodeCount();
+        List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        if (vmList == null || vmList.isEmpty() || vmList.size() < originalNodeCount) {
+            logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, it is in unstable state as not enough existing VM instances found!", kubernetesCluster.getUuid()));
+        } else {
+            for (KubernetesClusterVmMapVO vmMapVO : vmList) {
+                VMInstanceVO vmInstance = vmInstanceDao.findById(vmMapVO.getVmId());
+                if (vmInstance != null && vmInstance.getState().equals(VirtualMachine.State.Running) &&
+                        vmInstance.getHypervisorType() != Hypervisor.HypervisorType.XenServer &&
+                        vmInstance.getHypervisorType() != Hypervisor.HypervisorType.VMware &&
+                        vmInstance.getHypervisorType() != Hypervisor.HypervisorType.Simulator) {
+                    logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling Kubernetes cluster with running VMs on hypervisor %s is not supported!", kubernetesCluster.getUuid(), vmInstance.getHypervisorType()));
+                }
+            }
+        }
+    }
+
+    private void validateKubernetesClusterScaleSizeParameters() throws CloudRuntimeException {
+        final long originalClusterSize = kubernetesCluster.getNodeCount();
+        if (network == null) {
+            logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster network not found", kubernetesCluster.getUuid()));
+        }
+        // Check capacity and transition state
+        final long newVmRequiredCount = clusterSize - originalClusterSize;
+        final ServiceOffering clusterServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        if (clusterServiceOffering == null) {
+            logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster service offering not found", kubernetesCluster.getUuid()));
+        }
+        if (newVmRequiredCount > 0) {
+            final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+            try {
+                if (originalState.equals(KubernetesCluster.State.Running)) {
+                    plan(newVmRequiredCount, zone, clusterServiceOffering);
+                } else {
+                    plan(kubernetesCluster.getTotalNodeCount() + newVmRequiredCount, zone, clusterServiceOffering);
+                }
+            } catch (InsufficientCapacityException e) {
+                logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s in zone ID: %s, insufficient capacity", kubernetesCluster.getUuid(), zone.getUuid()));
+            }
+        }
+        List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        if (CollectionUtils.isEmpty(vmList) || vmList.size() < kubernetesCluster.getTotalNodeCount()) {
+            logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, it is in unstable state as not enough existing VM instances found", kubernetesCluster.getUuid()));
+        }
+    }
+
+    private void scaleKubernetesClusterOffering() throws CloudRuntimeException {
+        validateKubernetesClusterScaleOfferingParameters();
+        if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
+        }
+        if (KubernetesCluster.State.Created.equals(originalState)) {
+            kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
+            return;
+        }
+        final long size = kubernetesCluster.getTotalNodeCount();
+        List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
+        final long tobeScaledVMCount =  Math.min(vmList.size(), size);
+        for (long i = 0; i < tobeScaledVMCount; i++) {
+            KubernetesClusterVmMapVO vmMapVO = vmList.get((int) i);
+            UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
+            boolean result = false;
+            try {
+                result = userVmManager.upgradeVirtualMachine(userVM.getId(), serviceOffering.getId(), new HashMap<String, String>());
+            } catch (ResourceUnavailableException | ManagementServerException | ConcurrentOperationException | VirtualMachineMigrationException e) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
+            }
+            if (!result) {
+                logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+            if (System.currentTimeMillis() > scaleTimeoutTime) {
+                logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+        }
+        kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
+    }
+
+    private void scaleDownKubernetesClusterSize() throws CloudRuntimeException {
+        if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested);
+        }
+        final List<KubernetesClusterVmMapVO> originalVmList  = getKubernetesClusterVMMaps();
+        int i = originalVmList.size() - 1;
+        List<Long> removedVmIds = new ArrayList<>();
+        while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) {
+            KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i);
+            UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
+            if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, failed to remove Kubernetes node: %s running on VM ID: %s", kubernetesCluster.getUuid(), userVM.getHostName(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+            // For removing port-forwarding network rules
+            removedVmIds.add(userVM.getId());
+            try {
+                UserVm vm = userVmService.destroyVm(userVM.getId(), true);
+                if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
+                    logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'."
+                            , kubernetesCluster.getUuid()
+                            , vm.getInstanceName()),
+                            kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+                }
+            } catch (ResourceUnavailableException e) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s"
+                        , kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
+            }
+            kubernetesClusterVmMapDao.expunge(vmMapVO.getId());
+            if (System.currentTimeMillis() > scaleTimeoutTime) {
+                logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+            i--;
+        }
+        // Scale network rules to update firewall rule
+        try {
+            scaleKubernetesClusterNetworkRules(null, removedVmIds);
+        } catch (ManagementServerException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
+        }
+    }
+
+    private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException {
+        if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
+        }
+        List<UserVm> clusterVMs = new ArrayList<>();
+        List<Long> clusterVMIds = new ArrayList<>();
+        try {
+            clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
+        } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
+            logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to provision node VM in the cluster", kubernetesCluster.getUuid()), e);
+        }
+        attachIsoKubernetesVMs(clusterVMs);
+        for (UserVm vm : clusterVMs) {
+            clusterVMIds.add(vm.getId());
+        }
+        try {
+            scaleKubernetesClusterNetworkRules(clusterVMIds, null);
+        } catch (ManagementServerException e) {
+            logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), e);
+        }
+        KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        kubernetesClusterVO.setNodeCount(clusterSize);
+        boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort,
+                CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000);
+        detachIsoKubernetesVMs(clusterVMs);
+        if (!readyNodesCountValid) { // Scaling failed
+            logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid()));
+        }
+    }
+
+    private void scaleKubernetesClusterSize() throws CloudRuntimeException {
+        validateKubernetesClusterScaleSizeParameters();
+        final long originalClusterSize = kubernetesCluster.getNodeCount();
+        final long newVmRequiredCount = clusterSize - originalClusterSize;
+        if (KubernetesCluster.State.Created.equals(originalState)) {
+            if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
+                stateTransitTo(kubernetesCluster.getId(), newVmRequiredCount > 0 ? KubernetesCluster.Event.ScaleUpRequested : KubernetesCluster.Event.ScaleDownRequested);
+            }
+            kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
+            return;
+        }
+        Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
+        publicIpAddress = publicIpSshPort.first();
+        sshPort = publicIpSshPort.second();
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid()));
+        }
+        if (newVmRequiredCount < 0) { // downscale
+            scaleDownKubernetesClusterSize();
+        } else { // upscale, same node count handled above
+            scaleUpKubernetesClusterSize(newVmRequiredCount);
+        }
+        kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null);
+    }
+
+    public boolean scaleCluster() throws CloudRuntimeException {
+        init();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Scaling Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000;
+        final long originalClusterSize = kubernetesCluster.getNodeCount();
+        final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        if (existingServiceOffering == null) {
+            logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getUuid()));
+        }
+        final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId();
+        final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize;
+        final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize;
+        if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) {
+            if (newVMRequired > 0) {
+                scaleKubernetesClusterOffering();
+                scaleKubernetesClusterSize();
+            } else {
+                scaleKubernetesClusterSize();
+                scaleKubernetesClusterOffering();
+            }
+        } else if (serviceOfferingScalingNeeded) {
+            scaleKubernetesClusterOffering();
+        } else if (clusterSizeScalingNeeded) {
+            scaleKubernetesClusterSize();
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        return true;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java
new file mode 100644
index 0000000..d452563
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java
@@ -0,0 +1,640 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.framework.ca.Certificate;
+import org.apache.cloudstack.utils.security.CertUtils;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.Vlan;
+import com.cloud.dc.VlanVO;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientAddressCapacityException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesVersionManagerImpl;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.addr.PublicIp;
+import com.cloud.network.rules.LoadBalancer;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.Account;
+import com.cloud.user.SSHKeyPairVO;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.net.Ip;
+import com.cloud.utils.net.NetUtils;
+import com.cloud.vm.Nic;
+import com.cloud.vm.ReservationContext;
+import com.cloud.vm.ReservationContextImpl;
+import com.cloud.vm.VirtualMachine;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
+
+    public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+    }
+
+    private Pair<String, Map<Long, Network.IpAddresses>> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException {
+        String masterIp = null;
+        Map<Long, Network.IpAddresses> requestedIps = null;
+        if (Network.GuestType.Shared.equals(network.getGuestType())) {
+            List<Long> vlanIds = new ArrayList<>();
+            List<VlanVO> vlans = vlanDao.listVlansByNetworkId(network.getId());
+            for (VlanVO vlan : vlans) {
+                vlanIds.add(vlan.getId());
+            }
+            PublicIp ip = ipAddressManager.getAvailablePublicIpAddressFromVlans(zone.getId(), null, account, Vlan.VlanType.DirectAttached, vlanIds,network.getId(), null, false);
+            if (ip != null) {
+                masterIp = ip.getAddress().toString();
+            }
+            requestedIps = new HashMap<>();
+            Ip ipAddress = ip.getAddress();
+            boolean isIp6 = ipAddress.isIp6();
+            requestedIps.put(network.getId(), new Network.IpAddresses(ipAddress.isIp4() ? ip.getAddress().addr() : null, null));
+        } else {
+            masterIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null);
+        }
+        return new Pair<>(masterIp, requestedIps);
+    }
+
+    private boolean isKubernetesVersionSupportsHA() {
+        boolean haSupported = false;
+        final KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
+        if (version != null) {
+            try {
+                if (KubernetesVersionManagerImpl.compareSemanticVersions(version.getSemanticVersion(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT) >= 0) {
+                    haSupported = true;
+                }
+            } catch (IllegalArgumentException e) {
+                LOGGER.error(String.format("Unable to compare Kubernetes version for cluster version ID: %s with %s", version.getUuid(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT), e);
+            }
+        }
+        return haSupported;
+    }
+
+    private String getKubernetesMasterConfig(final String masterIp, final String serverIp,
+                                             final String hostName, final boolean haSupported,
+                                             final boolean ejectIso) throws IOException {
+        String k8sMasterConfig = readResourceFile("/conf/k8s-master.yml");
+        final String apiServerCert = "{{ k8s_master.apiserver.crt }}";
+        final String apiServerKey = "{{ k8s_master.apiserver.key }}";
+        final String caCert = "{{ k8s_master.ca.crt }}";
+        final String sshPubKey = "{{ k8s.ssh.pub.key }}";
+        final String clusterToken = "{{ k8s_master.cluster.token }}";
+        final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}";
+        final String ejectIsoKey = "{{ k8s.eject.iso }}";
+        final List<String> addresses = new ArrayList<>();
+        addresses.add(masterIp);
+        if (!serverIp.equals(masterIp)) {
+            addresses.add(serverIp);
+        }
+        final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes",
+                "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"),
+                addresses, 3650, null);
+        final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate());
+        final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey());
+        final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates());
+        k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n      "));
+        k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n      "));
+        k8sMasterConfig = k8sMasterConfig.replace(caCert, tlsCaCert.replace("\n", "\n      "));
+        String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
+        String sshKeyPair = kubernetesCluster.getKeyPair();
+        if (!Strings.isNullOrEmpty(sshKeyPair)) {
+            SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
+            if (sshkp != null) {
+                pubKey += "\n  - \"" + sshkp.getPublicKey() + "\"";
+            }
+        }
+        k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey);
+        k8sMasterConfig = k8sMasterConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
+        String initArgs = "";
+        if (haSupported) {
+            initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ",
+                    serverIp,
+                    CLUSTER_API_PORT,
+                    KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
+        }
+        initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp);
+        k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs);
+        k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
+        return k8sMasterConfig;
+    }
+
+    private UserVm createKubernetesMaster(final Network network, String serverIp) throws ManagementServerException,
+            ResourceUnavailableException, InsufficientCapacityException {
+        UserVm masterVm = null;
+        DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
+        List<Long> networkIds = new ArrayList<Long>();
+        networkIds.add(kubernetesCluster.getNetworkId());
+        Pair<String, Map<Long, Network.IpAddresses>> ipAddresses = getKubernetesMasterIpAddresses(zone, network, owner);
+        String masterIp = ipAddresses.first();
+        Map<Long, Network.IpAddresses> requestedIps = ipAddresses.second();
+        if (Network.GuestType.Shared.equals(network.getGuestType()) && Strings.isNullOrEmpty(serverIp)) {
+            serverIp = masterIp;
+        }
+        Network.IpAddresses addrs = new Network.IpAddresses(masterIp, null);
+        long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
+        Map<String, String> customParameterMap = new HashMap<String, String>();
+        if (rootDiskSize > 0) {
+            customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
+        }
+        String hostName = kubernetesClusterNodeNamePrefix + "-master";
+        if (kubernetesCluster.getMasterNodeCount() > 1) {
+            hostName += "-1";
+        }
+        hostName = getKubernetesClusterNodeAvailableName(hostName);
+        boolean haSupported = isKubernetesVersionSupportsHA();
+        String k8sMasterConfig = null;
+        try {
+            k8sMasterConfig = getKubernetesMasterConfig(masterIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
+        } catch (IOException e) {
+            logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e);
+        }
+        String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset()));
+        masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
+                hostName, hostName, null, null, null,
+                null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
+                requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null);
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", masterVm.getUuid(), hostName, kubernetesCluster.getUuid()));
+        }
+        return masterVm;
+    }
+
+    private String getKubernetesAdditionalMasterConfig(final String joinIp, final boolean ejectIso) throws IOException {
+        String k8sMasterConfig = readResourceFile("/conf/k8s-master-add.yml");
+        final String joinIpKey = "{{ k8s_master.join_ip }}";
+        final String clusterTokenKey = "{{ k8s_master.cluster.token }}";
+        final String sshPubKey = "{{ k8s.ssh.pub.key }}";
+        final String clusterHACertificateKey = "{{ k8s_master.cluster.ha.certificate.key }}";
+        final String ejectIsoKey = "{{ k8s.eject.iso }}";
+        String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
+        String sshKeyPair = kubernetesCluster.getKeyPair();
+        if (!Strings.isNullOrEmpty(sshKeyPair)) {
+            SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
+            if (sshkp != null) {
+                pubKey += "\n  - \"" + sshkp.getPublicKey() + "\"";
+            }
+        }
+        k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey);
+        k8sMasterConfig = k8sMasterConfig.replace(joinIpKey, joinIp);
+        k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
+        k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
+        k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
+        return k8sMasterConfig;
+    }
+
+    private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException,
+            ResourceUnavailableException, InsufficientCapacityException {
+        UserVm additionalMasterVm = null;
+        DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
+        ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
+        VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
+        List<Long> networkIds = new ArrayList<Long>();
+        networkIds.add(kubernetesCluster.getNetworkId());
+        Network.IpAddresses addrs = new Network.IpAddresses(null, null);
+        long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
+        Map<String, String> customParameterMap = new HashMap<String, String>();
+        if (rootDiskSize > 0) {
+            customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
+        }
+        String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1));
+        String k8sMasterConfig = null;
+        try {
+            k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
+        } catch (IOException e) {
+            logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e);
+        }
+        String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset()));
+        additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
+                hostName, hostName, null, null, null,
+                null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
+                null, addrs, null, null, null, customParameterMap, null, null, null, null);
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getUuid()));
+        }
+        return additionalMasterVm;
+    }
+
+    private UserVm provisionKubernetesClusterMasterVm(final Network network, final String publicIpAddress) throws
+            ManagementServerException, InsufficientCapacityException, ResourceUnavailableException {
+        UserVm k8sMasterVM = null;
+        k8sMasterVM = createKubernetesMaster(network, publicIpAddress);
+        addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId());
+        startKubernetesVM(k8sMasterVM);
+        k8sMasterVM = userVmDao.findById(k8sMasterVM.getId());
+        if (k8sMasterVM == null) {
+            throw new ManagementServerException(String.format("Failed to provision master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
+        }
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Provisioned the master VM ID: %s in to the Kubernetes cluster ID: %s", k8sMasterVM.getUuid(), kubernetesCluster.getUuid()));
+        }
+        return k8sMasterVM;
+    }
+
+    private List<UserVm> provisionKubernetesClusterAdditionalMasterVms(final String publicIpAddress) throws
+            InsufficientCapacityException, ManagementServerException, ResourceUnavailableException {
+        List<UserVm> additionalMasters = new ArrayList<>();
+        if (kubernetesCluster.getMasterNodeCount() > 1) {
+            for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) {
+                UserVm vm = null;
+                vm = createKubernetesAdditionalMaster(publicIpAddress, i);
+                addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
+                startKubernetesVM(vm);
+                vm = userVmDao.findById(vm.getId());
+                if (vm == null) {
+                    throw new ManagementServerException(String.format("Failed to provision additional master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
+                }
+                additionalMasters.add(vm);
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Provisioned additional master VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
+                }
+            }
+        }
+        return additionalMasters;
+    }
+
+    private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException {
+        final ReservationContext context = new ReservationContextImpl(null, null, null, owner);
+        Network network = networkDao.findById(kubernetesCluster.getNetworkId());
+        if (network == null) {
+            String msg  = String.format("Network for Kubernetes cluster ID: %s not found", kubernetesCluster.getUuid());
+            LOGGER.warn(msg);
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
+            throw new ManagementServerException(msg);
+        }
+        try {
+            networkMgr.startNetwork(network.getId(), destination, context);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Network ID: %s is started for the  Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
+            }
+        } catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) {
+            String msg = String.format("Failed to start Kubernetes cluster ID: %s as unable to start associated network ID: %s" , kubernetesCluster.getUuid(), network.getUuid());
+            LOGGER.error(msg, e);
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
+            throw new ManagementServerException(msg, e);
+        }
+        return network;
+    }
+
+    private void provisionLoadBalancerRule(final IpAddress publicIp, final Network network,
+                                           final Account account, final List<Long> clusterVMIds, final int port) throws NetworkRuleConflictException,
+            InsufficientAddressCapacityException {
+        LoadBalancer lb = lbService.createPublicLoadBalancerRule(null, "api-lb", "LB rule for API access",
+                port, port, port, port,
+                publicIp.getId(), NetUtils.TCP_PROTO, "roundrobin", network.getId(),
+                account.getId(), false, NetUtils.TCP_PROTO, true);
+
+        Map<Long, List<String>> vmIdIpMap = new HashMap<>();
+        for (int i = 0; i < kubernetesCluster.getMasterNodeCount(); ++i) {
+            List<String> ips = new ArrayList<>();
+            Nic masterVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId());
+            ips.add(masterVmNic.getIPv4Address());
+            vmIdIpMap.put(clusterVMIds.get(i), ips);
+        }
+        lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap);
+    }
+
+    /**
+     * Setup network rules for Kubernetes cluster
+     * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes
+     * API server is running. Also create load balancing rule to forward public
+     * IP traffic to master VMs' private IP.
+     * Open up  firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n
+     * for SSH access. Also create port-forwarding rule to forward public IP traffic to all
+     * @param network
+     * @param clusterVMs
+     * @throws ManagementServerException
+     */
+    private void setupKubernetesClusterNetworkRules(Network network, List<UserVm> clusterVMs) throws ManagementServerException {
+        if (!Network.GuestType.Isolated.equals(network.getGuestType())) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid()));
+            }
+            return;
+        }
+        List<Long> clusterVMIds = new ArrayList<>();
+        for (UserVm vm : clusterVMs) {
+            clusterVMIds.add(vm.getId());
+        }
+        IpAddress publicIp = getSourceNatIp(network);
+        if (publicIp == null) {
+            throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
+        }
+
+        try {
+            provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s",
+                        CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
+            }
+        } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
+            throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+
+        try {
+            int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1;
+            provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster ID: %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
+            }
+        } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
+            throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+
+        // Load balancer rule fo API access for master node VMs
+        try {
+            provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT);
+        } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) {
+            throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+
+        // Port forwarding rule fo SSH access on each node VM
+        try {
+            provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT);
+        } catch (ResourceUnavailableException | NetworkRuleConflictException e) {
+            throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+        }
+    }
+
+    private void startKubernetesClusterVMs() {
+        List <UserVm> clusterVms = getKubernetesClusterVMs();
+        for (final UserVm vm : clusterVms) {
+            if (vm == null) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+            try {
+                startKubernetesVM(vm);
+            } catch (ManagementServerException ex) {
+                LOGGER.warn(String.format("Failed to start VM ID: %s in Kubernetes cluster ID: %s due to ", vm.getUuid(), kubernetesCluster.getUuid()) + ex);
+                // dont bail out here. proceed further to stop the reset of the VM's
+            }
+        }
+        for (final UserVm userVm : clusterVms) {
+            UserVm vm = userVmDao.findById(userVm.getId());
+            if (vm == null || !vm.getState().equals(VirtualMachine.State.Running)) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+        }
+    }
+
+    private boolean isKubernetesClusterKubeConfigAvailable(final long timeoutTime) {
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            KubernetesClusterDetailsVO kubeConfigDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData");
+            if (kubeConfigDetail != null && !Strings.isNullOrEmpty(kubeConfigDetail.getValue())) {
+                return true;
+            }
+        }
+        String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime);
+        if (!Strings.isNullOrEmpty(kubeConfig)) {
+            final String masterVMPrivateIpAddress = getMasterVmPrivateIp();
+            if (!Strings.isNullOrEmpty(masterVMPrivateIpAddress)) {
+                kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", masterVMPrivateIpAddress, CLUSTER_API_PORT),
+                        String.format("server: https://%s:%d", publicIpAddress, CLUSTER_API_PORT));
+            }
+            kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "kubeConfigData", Base64.encodeBase64String(kubeConfig.getBytes(StringUtils.getPreferredCharset())), false);
+            return true;
+        }
+        return false;
+    }
+
+    private boolean isKubernetesClusterDashboardServiceRunning(final boolean onCreate, final Long timeoutTime) {
+        if (!onCreate) {
+            KubernetesClusterDetailsVO dashboardServiceRunningDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "dashboardServiceRunning");
+            if (dashboardServiceRunningDetail != null && Boolean.parseBoolean(dashboardServiceRunningDetail.getValue())) {
+                return true;
+            }
+        }
+        if (KubernetesClusterUtil.isKubernetesClusterDashboardServiceRunning(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime, 15000)) {
+            kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "dashboardServiceRunning", String.valueOf(true), false);
+            return true;
+        }
+        return false;
+    }
+
+    private void updateKubernetesClusterEntryEndpoint() {
+        KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        kubernetesClusterVO.setEndpoint(String.format("https://%s:%d/", publicIpAddress, CLUSTER_API_PORT));
+        kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
+    }
+
+    public boolean startKubernetesClusterOnCreate() {
+        init();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000;
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested);
+        DeployDestination dest = null;
+        try {
+            dest = plan();
+        } catch (InsufficientCapacityException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the cluster failed due to insufficient capacity in the Kubernetes cluster: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        Network network = null;
+        try {
+            network = startKubernetesClusterNetwork(dest);
+        } catch (ManagementServerException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as its network cannot be started", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
+        publicIpAddress = publicIpSshPort.first();
+        if (Strings.isNullOrEmpty(publicIpAddress) &&
+                (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
+        }
+        List<UserVm> clusterVMs = new ArrayList<>();
+        UserVm k8sMasterVM = null;
+        try {
+            k8sMasterVM = provisionKubernetesClusterMasterVm(network, publicIpAddress);
+        } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        clusterVMs.add(k8sMasterVM);
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sMasterVM);
+            publicIpAddress = publicIpSshPort.first();
+            if (Strings.isNullOrEmpty(publicIpAddress)) {
+                logTransitStateAndThrow(Level.WARN, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
+            }
+        }
+        try {
+            List<UserVm> additionalMasterVMs = provisionKubernetesClusterAdditionalMasterVms(publicIpAddress);
+            clusterVMs.addAll(additionalMasterVMs);
+        }  catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        try {
+            List<UserVm> nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress);
+            clusterVMs.addAll(nodeVMs);
+        }  catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Kubernetes cluster ID: %s VMs successfully provisioned", kubernetesCluster.getUuid()));
+        }
+        try {
+            setupKubernetesClusterNetworkRules(network, clusterVMs);
+        } catch (ManagementServerException e) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s, unable to setup network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
+        }
+        attachIsoKubernetesVMs(clusterVMs);
+        if (!KubernetesClusterUtil.isKubernetesClusterMasterVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) {
+            String msg = String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to access master node VMs of the cluster", kubernetesCluster.getUuid());
+            if (kubernetesCluster.getMasterNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) {
+                msg = String.format("%s. Make sure external load-balancer has port forwarding rules for SSH access on ports %d-%d and API access on port %d",
+                        msg,
+                        CLUSTER_NODES_DEFAULT_START_SSH_PORT,
+                        CLUSTER_NODES_DEFAULT_START_SSH_PORT + kubernetesCluster.getTotalNodeCount() - 1,
+                        CLUSTER_API_PORT);
+            }
+            logTransitStateDetachIsoAndThrow(Level.ERROR, msg, kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null);
+        }
+        boolean k8sApiServerSetup = KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000);
+        if (!k8sApiServerSetup) {
+            logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to provision API endpoint for the cluster", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null);
+        }
+        sshPort = publicIpSshPort.second();
+        updateKubernetesClusterEntryEndpoint();
+        boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort,
+                CLUSTER_NODE_VM_USER, sshKeyFile, startTimeoutTime, 15000);
+        detachIsoKubernetesVMs(clusterVMs);
+        if (!readyNodesCountValid) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
+        }
+        if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed);
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        return true;
+    }
+
+    public boolean startStoppedKubernetesCluster() throws CloudRuntimeException {
+        init();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000;
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested);
+        startKubernetesClusterVMs();
+        try {
+            InetAddress address = InetAddress.getByName(new URL(kubernetesCluster.getEndpoint()).getHost());
+        } catch (MalformedURLException | UnknownHostException ex) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Kubernetes cluster ID: %s has invalid API endpoint. Can not verify if cluster is in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        Pair<String, Integer> sshIpPort =  getKubernetesClusterServerIpSshPort(null);
+        publicIpAddress = sshIpPort.first();
+        sshPort = sshIpPort.second();
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) {
+            logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Kubernetes cluster ID: %s successfully started", kubernetesCluster.getUuid()));
+        }
+        return true;
+    }
+
+    public boolean reconcileAlertCluster() {
+        init();
+        final long startTimeoutTime = System.currentTimeMillis() + 3 * 60 * 1000;
+        List<KubernetesClusterVmMapVO> vmMapVOList = getKubernetesClusterVMMaps();
+        if (CollectionUtils.isEmpty(vmMapVOList) || vmMapVOList.size() != kubernetesCluster.getTotalNodeCount()) {
+            return false;
+        }
+        Pair<String, Integer> sshIpPort =  getKubernetesClusterServerIpSshPort(null);
+        publicIpAddress = sshIpPort.first();
+        sshPort = sshIpPort.second();
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            return false;
+        }
+        long actualNodeCount = 0;
+        try {
+            actualNodeCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile);
+        } catch (Exception e) {
+            return false;
+        }
+        if (kubernetesCluster.getTotalNodeCount() != actualNodeCount) {
+            return false;
+        }
+        if (Strings.isNullOrEmpty(sshIpPort.first())) {
+            return false;
+        }
+        if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, sshIpPort.first(),
+                KubernetesClusterActionWorker.CLUSTER_API_PORT, startTimeoutTime, 0)) {
+            return false;
+        }
+        updateKubernetesClusterEntryEndpoint();
+        if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
+            return false;
+        }
+        if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) {
+            return false;
+        }
+        // mark the cluster to be running
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.RecoveryRequested);
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        return true;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java
new file mode 100644
index 0000000..a8e1a2c
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java
@@ -0,0 +1,62 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.util.List;
+
+import org.apache.log4j.Level;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VirtualMachine;
+
+public class KubernetesClusterStopWorker extends KubernetesClusterActionWorker {
+    public KubernetesClusterStopWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+    }
+
+    public boolean stop() throws CloudRuntimeException {
+        init();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Stopping Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested);
+        List<UserVm> clusterVMs = getKubernetesClusterVMs();
+        for (UserVm vm : clusterVMs) {
+            if (vm == null) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Failed to find all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+            try {
+                userVmService.stopVirtualMachine(vm.getId(), false);
+            } catch (ConcurrentOperationException ex) {
+                LOGGER.warn(String.format("Failed to stop VM ID: %s in Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()), ex);
+            }
+        }
+        for (final UserVm userVm : clusterVMs) {
+            UserVm vm = userVmDao.findById(userVm.getId());
+            if (vm == null || !vm.getState().equals(VirtualMachine.State.Stopped)) {
+                logTransitStateAndThrow(Level.ERROR, String.format("Failed to stop all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+            }
+        }
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        return true;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java
new file mode 100644
index 0000000..eb9058d
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.actionworkers;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Level;
+
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesVersionManagerImpl;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.ssh.SshHelper;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorker {
+
+    private List<UserVm> clusterVMs = new ArrayList<>();
+    private KubernetesSupportedVersion upgradeVersion;
+    private File upgradeScriptFile;
+    private long upgradeTimeoutTime;
+
+    public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster,
+                                          final KubernetesSupportedVersion upgradeVersion,
+                                          final KubernetesClusterManagerImpl clusterManager) {
+        super(kubernetesCluster, clusterManager);
+        this.upgradeVersion = upgradeVersion;
+    }
+
+    private void retrieveUpgradeScriptFile() {
+        try {
+            String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh");
+            upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh");
+            BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile));
+            upgradeScriptFileWriter.write(upgradeScriptData);
+            upgradeScriptFileWriter.close();
+        } catch (IOException e) {
+            logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to prepare upgrade script", kubernetesCluster.getUuid()), e);
+        }
+    }
+
+    private Pair<Boolean, String> runInstallScriptOnVM(final UserVm vm, final int index) throws Exception {
+        int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index;
+        String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress;
+        SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
+                "~/", upgradeScriptFile.getAbsolutePath(), "0755");
+        String cmdStr = String.format("sudo ./%s %s %s %s %s",
+                upgradeScriptFile.getName(),
+                upgradeVersion.getSemanticVersion(),
+                index == 0 ? "true" : "false",
+                KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false",
+                Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType()));
+        return SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
+                cmdStr,
+                10000, 10000, 10 * 60 * 1000);
+    }
+
+    private void upgradeKubernetesClusterNodes() {
+        Pair<Boolean, String> result = null;
+        for (int i = 0; i < clusterVMs.size(); ++i) {
+            UserVm vm = clusterVMs.get(i);
+            String hostName = vm.getHostName();
+            if (!Strings.isNullOrEmpty(hostName)) {
+                hostName = hostName.toLowerCase();
+            }
+            result = null;
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
+                        vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
+            }
+            try {
+                result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
+                        String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
+                        10000, 10000, 60000);
+            } catch (Exception e) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);
+            }
+            if (!result.first()) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+            }
+            if (System.currentTimeMillis() > upgradeTimeoutTime) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+            }
+            try {
+                result = runInstallScriptOnVM(vm, i);
+            } catch (Exception e) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);
+            }
+            if (!result.first()) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+            }
+            if (System.currentTimeMillis() > upgradeTimeoutTime) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+            }
+            if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) {
+                logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to uncordon Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+            }
+            if (i == 0) { // Wait for master to get in Ready state
+                if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) {
+                    logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to get master Kubernetes node on VM ID: %s in ready state", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
+                }
+            }
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
+                        vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
+            }
+        }
+    }
+
+    public boolean upgradeCluster() throws CloudRuntimeException {
+        init();
+        if (LOGGER.isInfoEnabled()) {
+            LOGGER.info(String.format("Upgrading Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
+        }
+        upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000;
+        Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
+        publicIpAddress = publicIpSshPort.first();
+        sshPort = publicIpSshPort.second();
+        if (Strings.isNullOrEmpty(publicIpAddress)) {
+            logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid()));
+        }
+        clusterVMs = getKubernetesClusterVMs();
+        if (CollectionUtils.isEmpty(clusterVMs)) {
+            logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve VMs for cluster", kubernetesCluster.getUuid()));
+        }
+        retrieveUpgradeScriptFile();
+        stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested);
+        attachIsoKubernetesVMs(clusterVMs, upgradeVersion);
+        upgradeKubernetesClusterNodes();
+        detachIsoKubernetesVMs(clusterVMs);
+        KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
+        kubernetesClusterVO.setKubernetesVersionId(upgradeVersion.getId());
+        boolean updated = kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
+        if (!updated) {
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
+        } else {
+            stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
+        }
+        return updated;
+    }
+}
diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java
similarity index 54%
copy from server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java
index 362cabb..fe67323 100644
--- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java
@@ -14,24 +14,21 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-
-package com.cloud.api.query.dao;
+package com.cloud.kubernetes.cluster.dao;
 
 import java.util.List;
 
-import org.apache.cloudstack.api.response.NetworkOfferingResponse;
-
-import com.cloud.api.query.vo.NetworkOfferingJoinVO;
-import com.cloud.offering.NetworkOffering;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
 import com.cloud.utils.db.GenericDao;
+import com.cloud.utils.fsm.StateDao;
 
-public interface NetworkOfferingJoinDao extends GenericDao<NetworkOfferingJoinVO, Long> {
-
-    List<NetworkOfferingJoinVO> findByDomainId(long domainId);
-
-    List<NetworkOfferingJoinVO> findByZoneId(long zoneId);
-
-    NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering nof);
+public interface KubernetesClusterDao extends GenericDao<KubernetesClusterVO, Long>,
+        StateDao<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> {
 
-    NetworkOfferingJoinVO newNetworkOfferingView(NetworkOffering nof);
+    List<KubernetesClusterVO> listByAccount(long accountId);
+    List<KubernetesClusterVO> findKubernetesClustersToGarbageCollect();
+    List<KubernetesClusterVO> findKubernetesClustersInState(KubernetesCluster.State state);
+    List<KubernetesClusterVO> listByNetworkId(long networkId);
+    List<KubernetesClusterVO> listAllByKubernetesVersion(long kubernetesVersionId);
 }
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java
new file mode 100644
index 0000000..003286c
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java
@@ -0,0 +1,112 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster.dao;
+
+import java.util.List;
+
+import org.springframework.stereotype.Component;
+
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.TransactionLegacy;
+
+@Component
+public class KubernetesClusterDaoImpl extends GenericDaoBase<KubernetesClusterVO, Long> implements KubernetesClusterDao {
+
+    private final SearchBuilder<KubernetesClusterVO> AccountIdSearch;
+    private final SearchBuilder<KubernetesClusterVO> GarbageCollectedSearch;
+    private final SearchBuilder<KubernetesClusterVO> StateSearch;
+    private final SearchBuilder<KubernetesClusterVO> SameNetworkSearch;
+    private final SearchBuilder<KubernetesClusterVO> KubernetesVersionSearch;
+
+    public KubernetesClusterDaoImpl() {
+        AccountIdSearch = createSearchBuilder();
+        AccountIdSearch.and("account", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
+        AccountIdSearch.done();
+
+        GarbageCollectedSearch = createSearchBuilder();
+        GarbageCollectedSearch.and("gc", GarbageCollectedSearch.entity().isCheckForGc(), SearchCriteria.Op.EQ);
+        GarbageCollectedSearch.and("state", GarbageCollectedSearch.entity().getState(), SearchCriteria.Op.EQ);
+        GarbageCollectedSearch.done();
+
+        StateSearch = createSearchBuilder();
+        StateSearch.and("state", StateSearch.entity().getState(), SearchCriteria.Op.EQ);
+        StateSearch.done();
+
+        SameNetworkSearch = createSearchBuilder();
+        SameNetworkSearch.and("network_id", SameNetworkSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
+        SameNetworkSearch.done();
+
+        KubernetesVersionSearch = createSearchBuilder();
+        KubernetesVersionSearch.and("kubernetesVersionId", KubernetesVersionSearch.entity().getKubernetesVersionId(), SearchCriteria.Op.EQ);
+        KubernetesVersionSearch.done();
+    }
+
+    @Override
+    public List<KubernetesClusterVO> listByAccount(long accountId) {
+        SearchCriteria<KubernetesClusterVO> sc = AccountIdSearch.create();
+        sc.setParameters("account", accountId);
+        return listBy(sc, null);
+    }
+
+    @Override
+    public List<KubernetesClusterVO> findKubernetesClustersToGarbageCollect() {
+        SearchCriteria<KubernetesClusterVO> sc = GarbageCollectedSearch.create();
+        sc.setParameters("gc", true);
+        sc.setParameters("state", KubernetesCluster.State.Destroying);
+        return listBy(sc);
+    }
+
+    @Override
+    public List<KubernetesClusterVO> findKubernetesClustersInState(KubernetesCluster.State state) {
+        SearchCriteria<KubernetesClusterVO> sc = StateSearch.create();
+        sc.setParameters("state", state);
+        return listBy(sc);
+    }
+
+    @Override
+    public boolean updateState(KubernetesCluster.State currentState, KubernetesCluster.Event event, KubernetesCluster.State nextState,
+                               KubernetesCluster vo, Object data) {
+        // TODO: ensure this update is correct
+        TransactionLegacy txn = TransactionLegacy.currentTxn();
+        txn.start();
+
+        KubernetesClusterVO ccVo = (KubernetesClusterVO)vo;
+        ccVo.setState(nextState);
+        super.update(ccVo.getId(), ccVo);
+
+        txn.commit();
+        return true;
+    }
+
+    @Override
+    public List<KubernetesClusterVO> listByNetworkId(long networkId) {
+        SearchCriteria<KubernetesClusterVO> sc = SameNetworkSearch.create();
+        sc.setParameters("network_id", networkId);
+        return this.listBy(sc);
+    }
+
+    @Override
+    public List<KubernetesClusterVO> listAllByKubernetesVersion(long kubernetesVersionId) {
+        SearchCriteria<KubernetesClusterVO> sc = KubernetesVersionSearch.create();
+        sc.setParameters("kubernetesVersionId", kubernetesVersionId);
+        return this.listBy(sc);
+    }
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java
similarity index 68%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java
index 6edfe88..52990eb 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java
@@ -14,10 +14,15 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+package com.cloud.kubernetes.cluster.dao;
+
+
+import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+import com.cloud.utils.db.GenericDao;
+
+
+public interface KubernetesClusterDetailsDao extends GenericDao<KubernetesClusterDetailsVO, Long>, ResourceDetailsDao<KubernetesClusterDetailsVO> {
+
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java
similarity index 57%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java
index 6edfe88..66ef2ad 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java
@@ -14,10 +14,19 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+package com.cloud.kubernetes.cluster.dao;
+
+import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
+import org.springframework.stereotype.Component;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
+
+
+@Component
+public class KubernetesClusterDetailsDaoImpl extends ResourceDetailsDaoBase<KubernetesClusterDetailsVO> implements KubernetesClusterDetailsDao {
+
+    @Override
+    public void addDetail(long resourceId, String key, String value, boolean display) {
+        super.addDetail(new KubernetesClusterDetailsVO(resourceId, key, value, display));
+    }
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java
similarity index 70%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java
index 6edfe88..8b08dd3 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java
@@ -14,10 +14,13 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+package com.cloud.kubernetes.cluster.dao;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.utils.db.GenericDao;
+
+import java.util.List;
+
+public interface KubernetesClusterVmMapDao extends GenericDao<KubernetesClusterVmMapVO, Long> {
+    public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId);
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java
new file mode 100644
index 0000000..0b86b2c
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java
@@ -0,0 +1,46 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.kubernetes.cluster.dao;
+
+import java.util.List;
+
+import org.springframework.stereotype.Component;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
+
+@Component
+public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClusterVmMapVO, Long> implements KubernetesClusterVmMapDao {
+
+    private final SearchBuilder<KubernetesClusterVmMapVO> clusterIdSearch;
+
+    public KubernetesClusterVmMapDaoImpl() {
+        clusterIdSearch = createSearchBuilder();
+        clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
+        clusterIdSearch.done();
+    }
+
+    @Override
+    public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId) {
+        SearchCriteria<KubernetesClusterVmMapVO> sc = clusterIdSearch.create();
+        sc.setParameters("clusterId", clusterId);
+        return listBy(sc, null);
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java
new file mode 100644
index 0000000..68cd916
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java
@@ -0,0 +1,311 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.cluster.utils;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.URL;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.ssh.SshHelper;
+import com.google.common.base.Strings;
+
+public class KubernetesClusterUtil {
+
+    protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterUtil.class);
+
+    public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, String ipAddress, int port,
+                                                       String user, File sshKeyFile, String nodeName) throws Exception {
+        Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
+                user, sshKeyFile, null,
+                String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()),
+                10000, 10000, 20000);
+        if (result.first() && nodeName.equals(result.second().trim())) {
+            return true;
+        }
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster ID: %s. Output: %s", nodeName, kubernetesCluster.getUuid(), result.second()));
+        }
+        return false;
+    }
+
+    public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port,
+                                                       final String user, final File sshKeyFile, final String nodeName,
+                                                       final long timeoutTime, final int waitDuration) {
+        while (System.currentTimeMillis() < timeoutTime) {
+            boolean ready = false;
+            try {
+                ready = isKubernetesClusterNodeReady(kubernetesCluster, ipAddress, port, user, sshKeyFile, nodeName);
+            } catch (Exception e) {
+                LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster ID: %s", nodeName, kubernetesCluster.getUuid()), e);
+            }
+            if (ready) {
+                return true;
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ie) {
+                LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s to become ready", kubernetesCluster.getUuid(), nodeName), ie);
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Mark a given node in a given Kubernetes cluster as schedulable.
+     * kubectl uncordon command will be called through SSH using IP address and port of the host virtual machine or load balancer.
+     * Multiple retries with a given delay can be used.
+     * uncordon is required when a particular node in Kubernetes cluster is drained (usually during upgrade)
+     * @param kubernetesCluster
+     * @param ipAddress
+     * @param port
+     * @param user
+     * @param sshKeyFile
+     * @param userVm
+     * @param timeoutTime
+     * @param waitDuration
+     * @return
+     */
+    public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kubernetesCluster,
+                                                        final String ipAddress, final int port,
+                                                        final String user, final File sshKeyFile,
+                                                        final UserVm userVm, final long timeoutTime,
+                                                        final int waitDuration) {
+        String hostName = userVm.getHostName();
+        if (!Strings.isNullOrEmpty(hostName)) {
+            hostName = hostName.toLowerCase();
+        }
+        while (System.currentTimeMillis() < timeoutTime) {
+            Pair<Boolean, String> result = null;
+            try {
+                result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null,
+                        String.format("sudo kubectl uncordon %s", hostName),
+                        10000, 10000, 30000);
+                if (result.first()) {
+                    return true;
+                }
+            } catch (Exception e) {
+                LOGGER.warn(String.format("Failed to uncordon node: %s on VM ID: %s in Kubernetes cluster ID: %s", hostName, userVm.getUuid(), kubernetesCluster.getUuid()), e);
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ie) {
+                LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie);
+            }
+        }
+        return false;
+    }
+
+    public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
+                                                                 final int port, final String user, final File sshKeyFile,
+                                                                 final String namespace, String serviceName) {
+        try {
+            String cmd = "sudo kubectl get pods --all-namespaces";
+            if (!Strings.isNullOrEmpty(namespace)) {
+                cmd = String.format("sudo kubectl get pods --namespace=%s", namespace);
+            }
+            Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
+                    sshKeyFile, null, cmd,
+                    10000, 10000, 10000);
+            if (result.first() && !Strings.isNullOrEmpty(result.second())) {
+                String[] lines = result.second().split("\n");
+                for (String line :
+                        lines) {
+                    if (line.contains(serviceName) && line.contains("Running")) {
+                        if (LOGGER.isDebugEnabled()) {
+                            LOGGER.debug(String.format("Service : %s in namespace: %s for the Kubernetes cluster ID: %s is running", serviceName, namespace, kubernetesCluster.getUuid()));
+                        }
+                        return true;
+                    }
+                }
+            }
+        } catch (Exception e) {
+            LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster ID: %s", serviceName, namespace, kubernetesCluster.getUuid()), e);
+        }
+        return false;
+    }
+
+    public static boolean isKubernetesClusterDashboardServiceRunning(final KubernetesCluster kubernetesCluster, String ipAddress,
+                                                                     final int port, final String user, final File sshKeyFile,
+                                                                     final long timeoutTime, final long waitDuration) {
+        boolean running = false;
+        // Check if dashboard service is up running.
+        while (System.currentTimeMillis() < timeoutTime) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster ID: %s to come up", kubernetesCluster.getUuid()));
+            }
+            if (isKubernetesClusterAddOnServiceRunning(kubernetesCluster, ipAddress, port, user, sshKeyFile, "kubernetes-dashboard", "kubernetes-dashboard")) {
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Dashboard service for the Kubernetes cluster ID: %s is in running state", kubernetesCluster.getUuid()));
+                }
+                running = true;
+                break;
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ex) {
+                LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s API dashboard service to be available", kubernetesCluster.getUuid()), ex);
+            }
+        }
+        return running;
+    }
+
+    public static String getKubernetesClusterConfig(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port,
+                                                    final String user, final File sshKeyFile, final long timeoutTime) {
+        String kubeConfig = "";
+        while (System.currentTimeMillis() < timeoutTime) {
+            try {
+                Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
+                        sshKeyFile, null, "sudo cat /etc/kubernetes/admin.conf",
+                        10000, 10000, 10000);
+
+                if (result.first() && !Strings.isNullOrEmpty(result.second())) {
+                    kubeConfig = result.second();
+                    break;
+                } else  {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second()));
+                    }
+                }
+            } catch (Exception e) {
+                LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+            }
+        }
+        return kubeConfig;
+    }
+
+    public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, final String ipAddress,
+                                                          final int port, final String user, final File sshKeyFile) throws Exception {
+        Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
+                user, sshKeyFile, null,
+                "sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l",
+                10000, 10000, 20000);
+        if (result.first()) {
+            return Integer.parseInt(result.second().trim().replace("\"", ""));
+        } else {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second()));
+            }
+        }
+        return 0;
+    }
+
+    public static boolean isKubernetesClusterServerRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
+                                                           final int port, final long timeoutTime, final long waitDuration) {
+        boolean k8sApiServerSetup = false;
+        while (System.currentTimeMillis() < timeoutTime) {
+            try {
+                String versionOutput = IOUtils.toString(new URL(String.format("https://%s:%d/version", ipAddress, port)), StringUtils.getPreferredCharset());
+                if (!Strings.isNullOrEmpty(versionOutput)) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Kubernetes cluster ID: %s API has been successfully provisioned, %s", kubernetesCluster.getUuid(), versionOutput));
+                    }
+                    k8sApiServerSetup = true;
+                    break;
+                }
+            } catch (Exception e) {
+                LOGGER.warn(String.format("API endpoint for Kubernetes cluster ID: %s not available", kubernetesCluster.getUuid()), e);
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ie) {
+                LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s API endpoint to be available", kubernetesCluster.getUuid()), ie);
+            }
+        }
+        return k8sApiServerSetup;
+    }
+
+    public static boolean isKubernetesClusterMasterVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
+                                                             final int port, final long timeoutTime) {
+        boolean masterVmRunning = false;
+        while (!masterVmRunning && System.currentTimeMillis() < timeoutTime) {
+            try (Socket socket = new Socket()) {
+                socket.connect(new InetSocketAddress(ipAddress, port), 10000);
+                masterVmRunning = true;
+            } catch (IOException e) {
+                if (LOGGER.isInfoEnabled()) {
+                    LOGGER.info(String.format("Waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid()));
+                }
+                try {
+                    Thread.sleep(10000);
+                } catch (InterruptedException ex) {
+                    LOGGER.warn(String.format("Error while waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid()), ex);
+                }
+            }
+        }
+        return masterVmRunning;
+    }
+
+    public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster,
+                                                                   final String ipAddress, final int port,
+                                                                   final String user, final File sshKeyFile,
+                                                                   final long timeoutTime, final long waitDuration) {
+        while (System.currentTimeMillis() < timeoutTime) {
+            if (LOGGER.isDebugEnabled()) {
+                LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster ID: %s with total %d provisioned nodes", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount()));
+            }
+            try {
+                int nodesCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, ipAddress, port,
+                        user, sshKeyFile);
+                if (nodesCount == kubernetesCluster.getTotalNodeCount()) {
+                    if (LOGGER.isInfoEnabled()) {
+                        LOGGER.info(String.format("Kubernetes cluster ID: %s has %d ready nodes now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount()));
+                    }
+                    return true;
+                } else {
+                    if (LOGGER.isDebugEnabled()) {
+                        LOGGER.debug(String.format("Kubernetes cluster ID: %s has total %d provisioned nodes while %d ready now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), nodesCount));
+                    }
+                }
+            } catch (Exception e) {
+                LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
+            }
+            try {
+                Thread.sleep(waitDuration);
+            } catch (InterruptedException ex) {
+                LOGGER.warn(String.format("Error while waiting during Kubernetes cluster ID: %s ready node check", kubernetesCluster.getUuid()), ex);
+            }
+        }
+        return false;
+    }
+
+    public static String generateClusterToken(final KubernetesCluster kubernetesCluster) {
+        String token = kubernetesCluster.getUuid();
+        token = token.replaceAll("-", "");
+        token = token.substring(0, 22);
+        token = token.substring(0, 6) + "." + token.substring(6);
+        return token;
+    }
+
+    public static String generateClusterHACertificateKey(final KubernetesCluster kubernetesCluster) {
+        String uuid = kubernetesCluster.getUuid();
+        StringBuilder token = new StringBuilder(uuid.replaceAll("-", ""));
+        while (token.length() < 64) {
+            token.append(token);
+        }
+        return token.toString().substring(0, 64);
+    }
+}
diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java
similarity index 54%
copy from server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java
index 362cabb..0cb430a 100644
--- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java
@@ -15,23 +15,35 @@
 // specific language governing permissions and limitations
 // under the License.
 
-package com.cloud.api.query.dao;
-
-import java.util.List;
-
-import org.apache.cloudstack.api.response.NetworkOfferingResponse;
-
-import com.cloud.api.query.vo.NetworkOfferingJoinVO;
-import com.cloud.offering.NetworkOffering;
-import com.cloud.utils.db.GenericDao;
-
-public interface NetworkOfferingJoinDao extends GenericDao<NetworkOfferingJoinVO, Long> {
-
-    List<NetworkOfferingJoinVO> findByDomainId(long domainId);
-
-    List<NetworkOfferingJoinVO> findByZoneId(long zoneId);
-
-    NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering nof);
-
-    NetworkOfferingJoinVO newNetworkOfferingView(NetworkOffering nof);
+package com.cloud.kubernetes.version;
+
+import org.apache.cloudstack.api.Identity;
+import org.apache.cloudstack.api.InternalIdentity;
+
+/**
+ * KubernetesSupportedVersion describes the properties of supported kubernetes version
+ *
+ */
+public interface KubernetesSupportedVersion extends InternalIdentity, Identity {
+
+    public enum State {
+        Disabled, Enabled
+    }
+
+    long getId();
+    String getName();
+    String getSemanticVersion();
+    long getIsoId();
+    Long getZoneId();
+    State getState();
+
+    /**
+     * @return minimum # of cpu.
+     */
+    int getMinimumCpu();
+
+    /**
+     * @return minimum ram size in megabytes
+     */
+    int getMinimumRamSize();
 }
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java
new file mode 100644
index 0000000..3f66f94
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java
@@ -0,0 +1,168 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.version;
+
+import java.util.Date;
+import java.util.UUID;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.EnumType;
+import javax.persistence.Enumerated;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.Table;
+
+import com.cloud.utils.db.GenericDao;
+
+@Entity
+@Table(name = "kubernetes_supported_version")
+public class KubernetesSupportedVersionVO implements KubernetesSupportedVersion {
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    private long id;
+
+    @Column(name = "uuid")
+    private String uuid;
+
+    @Column(name = "name")
+    private String name;
+
+    @Column(name = "semantic_version")
+    private String semanticVersion;
+
+    @Column(name = "iso_id")
+    private long isoId;
+
+    @Column(name = "zone_id")
+    private Long zoneId;
+
+    @Column(name = "state")
+    @Enumerated(value = EnumType.STRING)
+    State state = State.Enabled;
+
+    @Column(name = "min_cpu")
+    private int minimumCpu;
+
+    @Column(name = "min_ram_size")
+    private int minimumRamSize;
+
+    @Column(name = GenericDao.CREATED_COLUMN)
+    Date created;
+
+    @Column(name = GenericDao.REMOVED_COLUMN)
+    Date removed;
+
+    public KubernetesSupportedVersionVO() {
+        this.uuid = UUID.randomUUID().toString();
+    }
+
+    public KubernetesSupportedVersionVO(String name, String semanticVersion, long isoId, Long zoneId,
+                                        int minimumCpu, int minimumRamSize) {
+        this.uuid = UUID.randomUUID().toString();
+        this.name = name;
+        this.semanticVersion = semanticVersion;
+        this.isoId = isoId;
+        this.zoneId = zoneId;
+        this.minimumCpu = minimumCpu;
+        this.minimumRamSize = minimumRamSize;
+    }
+
+    @Override
+    public long getId() {
+        return id;
+    }
+
+    @Override
+    public String getUuid() {
+        return uuid;
+    }
+
+    @Override
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    @Override
+    public String getSemanticVersion() {
+        return semanticVersion;
+    }
+
+    public void setSemanticVersion(String semanticVersion) {
+        this.semanticVersion = semanticVersion;
+    }
+
+    @Override
+    public long getIsoId() {
+        return isoId;
+    }
+
+    public void setIsoId(long isoId) {
+        this.isoId = isoId;
+    }
+
+    @Override
+    public Long getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(Long zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    @Override
+    public State getState() {
+        return this.state;
+    }
+
+    public void setState(State state) {
+        this.state = state;
+    }
+
+    @Override
+    public int getMinimumCpu() {
+        return minimumCpu;
+    }
+
+    public void setMinimumCpu(int minimumCpu) {
+        this.minimumCpu = minimumCpu;
+    }
+
+    @Override
+    public int getMinimumRamSize() {
+        return minimumRamSize;
+    }
+
+    public void setMinimumRamSize(int minimumRamSize) {
+        this.minimumRamSize = minimumRamSize;
+    }
+
+    public Date getCreated() {
+        return created;
+    }
+
+    public Date getRemoved() {
+        return removed;
+    }
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java
similarity index 68%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java
index 6edfe88..4c979ba 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java
@@ -14,10 +14,11 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+
+package com.cloud.kubernetes.version;
+
+public class KubernetesVersionEventTypes {
+    public static final String EVENT_KUBERNETES_VERSION_ADD = "KUBERNETES.VERSION.ADD";
+    public static final String EVENT_KUBERNETES_VERSION_DELETE = "KUBERNETES.VERSION.DELETE";
+    public static final String EVENT_KUBERNETES_VERSION_UPDATE = "KUBERNETES.VERSION.UPDATE";
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java
new file mode 100644
index 0000000..4eefc3f
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java
@@ -0,0 +1,388 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.version;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
+import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.log4j.Logger;
+
+import com.cloud.api.query.dao.TemplateJoinDao;
+import com.cloud.api.query.vo.TemplateJoinVO;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.event.ActionEvent;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.kubernetes.cluster.KubernetesClusterVO;
+import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
+import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateZoneDao;
+import com.cloud.template.TemplateApiService;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.AccountManager;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Strings;
+
+public class KubernetesVersionManagerImpl extends ManagerBase implements KubernetesVersionService {
+    public static final Logger LOGGER = Logger.getLogger(KubernetesVersionManagerImpl.class.getName());
+
+    @Inject
+    private KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
+    @Inject
+    private KubernetesClusterDao kubernetesClusterDao;
+    @Inject
+    private AccountManager accountManager;
+    @Inject
+    private VMTemplateDao templateDao;
+    @Inject
+    private TemplateJoinDao templateJoinDao;
+    @Inject
+    private VMTemplateZoneDao templateZoneDao;
+    @Inject
+    private DataCenterDao dataCenterDao;
+    @Inject
+    private TemplateApiService templateService;
+
+    private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) {
+        KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse();
+        response.setObjectName("kubernetessupportedversion");
+        response.setId(kubernetesSupportedVersion.getUuid());
+        response.setName(kubernetesSupportedVersion.getName());
+        response.setSemanticVersion(kubernetesSupportedVersion.getSemanticVersion());
+        if (kubernetesSupportedVersion.getState() != null) {
+            response.setState(kubernetesSupportedVersion.getState().toString());
+        }
+        response.setMinimumCpu(kubernetesSupportedVersion.getMinimumCpu());
+        response.setMinimumRamSize(kubernetesSupportedVersion.getMinimumRamSize());
+        DataCenterVO zone = dataCenterDao.findById(kubernetesSupportedVersion.getZoneId());
+        if (zone != null) {
+            response.setZoneId(zone.getUuid());
+            response.setZoneName(zone.getName());
+        }
+        if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(),
+                KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) {
+            response.setSupportsHA(true);
+        } else {
+            response.setSupportsHA(false);
+        }
+        TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId());
+        if (template != null) {
+            response.setIsoId(template.getUuid());
+            response.setIsoName(template.getName());
+            response.setIsoState(template.getState().toString());
+        }
+        return response;
+    }
+
+    private ListResponse<KubernetesSupportedVersionResponse> createKubernetesSupportedVersionListResponse(List<KubernetesSupportedVersionVO> versions) {
+        List<KubernetesSupportedVersionResponse> responseList = new ArrayList<>();
+        for (KubernetesSupportedVersionVO version : versions) {
+            responseList.add(createKubernetesSupportedVersionResponse(version));
+        }
+        ListResponse<KubernetesSupportedVersionResponse> response = new ListResponse<>();
+        response.setResponses(responseList);
+        return response;
+    }
+
+    private static boolean isSemanticVersion(final String version) {
+        if(!version.matches("[0-9]+(\\.[0-9]+)*")) {
+            return false;
+        }
+        String[] parts = version.split("\\.");
+        if (parts.length < 3) {
+            return false;
+        }
+        return true;
+    }
+
+    private List <KubernetesSupportedVersionVO> filterKubernetesSupportedVersions(List <KubernetesSupportedVersionVO> versions, final String minimumSemanticVersion) {
+        if (!Strings.isNullOrEmpty(minimumSemanticVersion)) {
+            for (int i = versions.size() - 1; i >= 0; --i) {
+                KubernetesSupportedVersionVO version = versions.get(i);
+                try {
+                    if (compareSemanticVersions(minimumSemanticVersion, version.getSemanticVersion()) > 0) {
+                        versions.remove(i);
+                    }
+                } catch (IllegalArgumentException e) {
+                    LOGGER.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion));
+                    versions.remove(i);
+                }
+            }
+        }
+        return versions;
+    }
+
+    private VirtualMachineTemplate registerKubernetesVersionIso(final Long zoneId, final String versionName, final String isoUrl, final String isoChecksum)throws IllegalAccessException, NoSuchFieldException,
+            IllegalArgumentException, ResourceAllocationException {
+        String isoName = String.format("%s-Kubernetes-Binaries-ISO", versionName);
+        RegisterIsoCmd registerIsoCmd = new RegisterIsoCmd();
+        registerIsoCmd = ComponentContext.inject(registerIsoCmd);
+        registerIsoCmd.setIsoName(isoName);
+        registerIsoCmd.setPublic(true);
+        if (zoneId != null) {
+            registerIsoCmd.setZoneId(zoneId);
+        }
+        registerIsoCmd.setDisplayText(isoName);
+        registerIsoCmd.setBootable(false);
+        registerIsoCmd.setUrl(isoUrl);
+        if (!Strings.isNullOrEmpty(isoChecksum)) {
+            registerIsoCmd.setChecksum(isoChecksum);
+        }
+        registerIsoCmd.setAccountName(accountManager.getSystemAccount().getAccountName());
+        registerIsoCmd.setDomainId(accountManager.getSystemAccount().getDomainId());
+        return templateService.registerIso(registerIsoCmd);
+    }
+
+    private void deleteKubernetesVersionIso(long templateId) throws IllegalAccessException, NoSuchFieldException,
+            IllegalArgumentException {
+        DeleteIsoCmd deleteIsoCmd = new DeleteIsoCmd();
+        deleteIsoCmd = ComponentContext.inject(deleteIsoCmd);
+        deleteIsoCmd.setId(templateId);
+        templateService.deleteIso(deleteIsoCmd);
+    }
+
+    public static int compareSemanticVersions(String v1, String v2) throws IllegalArgumentException {
+        if (Strings.isNullOrEmpty(v1) || Strings.isNullOrEmpty(v2)) {
+            throw new IllegalArgumentException(String.format("Invalid version comparision with versions %s, %s", v1, v2));
+        }
+        if(!isSemanticVersion(v1)) {
+            throw new IllegalArgumentException(String.format("Invalid version format, %s", v1));
+        }
+        if(!isSemanticVersion(v2)) {
+            throw new IllegalArgumentException(String.format("Invalid version format, %s", v2));
+        }
+        String[] thisParts = v1.split("\\.");
+        String[] thatParts = v2.split("\\.");
+        int length = Math.max(thisParts.length, thatParts.length);
+        for(int i = 0; i < length; i++) {
+            int thisPart = i < thisParts.length ?
+                    Integer.parseInt(thisParts[i]) : 0;
+            int thatPart = i < thatParts.length ?
+                    Integer.parseInt(thatParts[i]) : 0;
+            if(thisPart < thatPart)
+                return -1;
+            if(thisPart > thatPart)
+                return 1;
+        }
+        return 0;
+    }
+
+    /**
+     * Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion
+     * Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR.
+     * That is, MINOR versions cannot be skipped during upgrade.
+     * For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2
+     * @param currentVersion
+     * @param upgradeVersion
+     * @return
+     * @throws IllegalArgumentException
+     */
+    public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException {
+        int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion);
+        if (versionDiff == 0) {
+            throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
+        } else if (versionDiff < 0) {
+            throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
+        }
+        String[] thisParts = currentVersion.split("\\.");
+        String[] thatParts = upgradeVersion.split("\\.");
+        int majorVerDiff = Integer.parseInt(thatParts[0]) - Integer.parseInt(thisParts[0]);
+        int minorVerDiff = Integer.parseInt(thatParts[1]) - Integer.parseInt(thisParts[1]);
+
+        if (majorVerDiff != 0 || minorVerDiff > 1) {
+            throw new IllegalArgumentException(String.format("Kubernetes clusters can be upgraded between next minor or patch version releases, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
+        }
+        return true;
+    }
+
+    @Override
+    public ListResponse<KubernetesSupportedVersionResponse> listKubernetesSupportedVersions(final ListKubernetesSupportedVersionsCmd cmd) {
+        if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
+            throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
+        }
+        final Long versionId = cmd.getId();
+        final Long zoneId = cmd.getZoneId();
+        String minimumSemanticVersion = cmd.getMinimumSemanticVersion();
+        final Long minimumKubernetesVersionId = cmd.getMinimumKubernetesVersionId();
+        if (!Strings.isNullOrEmpty(minimumSemanticVersion) && minimumKubernetesVersionId != null) {
+            throw new CloudRuntimeException(String.format("Both parameters %s and %s can not be passed together", ApiConstants.MIN_SEMANTIC_VERSION, ApiConstants.MIN_KUBERNETES_VERSION_ID));
+        }
+        if (minimumKubernetesVersionId != null) {
+            KubernetesSupportedVersionVO minVersion = kubernetesSupportedVersionDao.findById(minimumKubernetesVersionId);
+            if (minVersion == null) {
+                throw new InvalidParameterValueException(String.format("Invalid %s passed", ApiConstants.MIN_KUBERNETES_VERSION_ID));
+            }
+            minimumSemanticVersion = minVersion.getSemanticVersion();
+        }
+        List <KubernetesSupportedVersionVO> versions = new ArrayList<>();
+        if (versionId != null) {
+            KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId);
+            if (version != null && (zoneId == null || version.getZoneId() == null || version.getZoneId().equals(zoneId))) {
+                versions.add(version);
+            }
+        } else {
+            if (zoneId == null) {
+                versions = kubernetesSupportedVersionDao.listAll();
+            } else {
+                versions = kubernetesSupportedVersionDao.listAllInZone(zoneId);
+            }
+        }
+        versions = filterKubernetesSupportedVersions(versions, minimumSemanticVersion);
+
+        return createKubernetesSupportedVersionListResponse(versions);
+    }
+
+    @Override
+    @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_ADD, eventDescription = "Adding Kubernetes supported version")
+    public KubernetesSupportedVersionResponse addKubernetesSupportedVersion(final AddKubernetesSupportedVersionCmd cmd) {
+        if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
+            throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
+        }
+        String name = cmd.getName();
+        final String semanticVersion = cmd.getSemanticVersion();
+        final Long zoneId = cmd.getZoneId();
+        final String isoUrl = cmd.getUrl();
+        final String isoChecksum = cmd.getChecksum();
+        final Integer minimumCpu = cmd.getMinimumCpu();
+        final Integer minimumRamSize = cmd.getMinimumRamSize();
+        if (minimumCpu == null || minimumCpu < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU) {
+            throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_CPU_NUMBER));
+        }
+        if (minimumRamSize == null || minimumRamSize < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
+            throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_MEMORY));
+        }
+        if (compareSemanticVersions(semanticVersion, MIN_KUBERNETES_VERSION) < 0) {
+            throw new InvalidParameterValueException(String.format("New supported Kubernetes version cannot be added as %s is minimum version supported by Kubernetes Service", MIN_KUBERNETES_VERSION));
+        }
+        if (zoneId != null && dataCenterDao.findById(zoneId) == null) {
+            throw new InvalidParameterValueException("Invalid zone specified");
+        }
+        if (Strings.isNullOrEmpty(isoUrl)) {
+            throw new InvalidParameterValueException(String.format("Invalid URL for ISO specified, %s", isoUrl));
+        }
+        if (Strings.isNullOrEmpty(name)) {
+            name = String.format("v%s", semanticVersion);
+            if (zoneId != null) {
+                name = String.format("%s-%s", name, dataCenterDao.findById(zoneId).getName());
+            }
+        }
+
+        VMTemplateVO template = null;
+        try {
+            VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum);
+            template = templateDao.findById(vmTemplate.getId());
+        } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException | ResourceAllocationException ex) {
+            LOGGER.error(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl), ex);
+            throw new CloudRuntimeException(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl));
+        }
+
+        KubernetesSupportedVersionVO supportedVersionVO = new KubernetesSupportedVersionVO(name, semanticVersion, template.getId(), zoneId, minimumCpu, minimumRamSize);
+        supportedVersionVO = kubernetesSupportedVersionDao.persist(supportedVersionVO);
+
+        return createKubernetesSupportedVersionResponse(supportedVersionVO);
+    }
+
+    @Override
+    @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE, eventDescription = "Deleting Kubernetes supported version", async = true)
+    public boolean deleteKubernetesSupportedVersion(final DeleteKubernetesSupportedVersionCmd cmd) {
+        if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
+            throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
+        }
+        final Long versionId = cmd.getId();
+        KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(versionId);
+        if (version == null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes version id specified");
+        }
+        List<KubernetesClusterVO> clusters = kubernetesClusterDao.listAllByKubernetesVersion(versionId);
+        if (clusters.size() > 0) {
+            throw new CloudRuntimeException(String.format("Unable to delete Kubernetes version ID: %s. Existing clusters currently using the version.", version.getUuid()));
+        }
+
+        VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId());
+        if (template == null) {
+            LOGGER.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid()));
+        }
+        if (template != null && template.getRemoved() == null) { // Delete ISO
+            try {
+                deleteKubernetesVersionIso(template.getId());
+            } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) {
+                LOGGER.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex);
+                throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()));
+            }
+        }
+        return kubernetesSupportedVersionDao.remove(version.getId());
+    }
+
+    @Override
+    @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_UPDATE, eventDescription = "Updating Kubernetes supported version")
+    public KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(final UpdateKubernetesSupportedVersionCmd cmd) {
+        if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
+            throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
+        }
+        final Long versionId = cmd.getId();
+        KubernetesSupportedVersion.State state = null;
+        KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId);
+        if (version == null) {
+            throw new InvalidParameterValueException("Invalid Kubernetes version id specified");
+        }
+        try {
+            state = KubernetesSupportedVersion.State.valueOf(cmd.getState());
+        } catch (IllegalArgumentException iae) {
+            throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.STATE));
+        }
+        if (!state.equals(version.getState())) {
+            version = kubernetesSupportedVersionDao.createForUpdate(version.getId());
+            version.setState(state);
+            if (!kubernetesSupportedVersionDao.update(version.getId(), version)) {
+                throw new CloudRuntimeException(String.format("Failed to update Kubernetes supported version ID: %s", version.getUuid()));
+            }
+            version = kubernetesSupportedVersionDao.findById(versionId);
+        }
+        return  createKubernetesSupportedVersionResponse(version);
+    }
+
+    @Override
+    public List<Class<?>> getCommands() {
+        List<Class<?>> cmdList = new ArrayList<Class<?>>();
+        if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
+            return cmdList;
+        }
+        cmdList.add(AddKubernetesSupportedVersionCmd.class);
+        cmdList.add(ListKubernetesSupportedVersionsCmd.class);
+        cmdList.add(DeleteKubernetesSupportedVersionCmd.class);
+        cmdList.add(UpdateKubernetesSupportedVersionCmd.class);
+        return cmdList;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java
new file mode 100644
index 0000000..8e4cd03
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.version;
+
+import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
+import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+
+import com.cloud.utils.component.PluggableService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public interface KubernetesVersionService extends PluggableService {
+    static final String MIN_KUBERNETES_VERSION = "1.11.0";
+    ListResponse<KubernetesSupportedVersionResponse> listKubernetesSupportedVersions(ListKubernetesSupportedVersionsCmd cmd);
+    KubernetesSupportedVersionResponse addKubernetesSupportedVersion(AddKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
+    boolean deleteKubernetesSupportedVersion(DeleteKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
+    KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(UpdateKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
+}
diff --git a/ui/plugins/plugins.js b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java
similarity index 69%
copy from ui/plugins/plugins.js
copy to plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java
index 6edfe88..69de862 100644
--- a/ui/plugins/plugins.js
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java
@@ -14,10 +14,14 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-(function($, cloudStack) {
-  cloudStack.plugins = [
-    //'testPlugin',
-    'cloudian',
-    'quota'
-  ];
-}(jQuery, cloudStack));
+
+package com.cloud.kubernetes.version.dao;
+
+import java.util.List;
+
+import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
+import com.cloud.utils.db.GenericDao;
+
+public interface KubernetesSupportedVersionDao extends GenericDao<KubernetesSupportedVersionVO, Long> {
+    List<KubernetesSupportedVersionVO> listAllInZone(long dataCenterId);
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java
new file mode 100644
index 0000000..5dd6eff
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java
@@ -0,0 +1,42 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.kubernetes.version.dao;
+
+import java.util.List;
+
+import org.springframework.stereotype.Component;
+
+import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchCriteria;
+
+@Component
+public class KubernetesSupportedVersionDaoImpl extends GenericDaoBase<KubernetesSupportedVersionVO, Long> implements KubernetesSupportedVersionDao {
+    public KubernetesSupportedVersionDaoImpl() {
+    }
+
+    @Override
+    public List<KubernetesSupportedVersionVO> listAllInZone(long dataCenterId) {
+        SearchCriteria<KubernetesSupportedVersionVO> sc = createSearchCriteria();
+        SearchCriteria<KubernetesSupportedVersionVO> scc = createSearchCriteria();
+        scc.addOr("zoneId", SearchCriteria.Op.EQ, dataCenterId);
+        scc.addOr("zoneId", SearchCriteria.Op.NULL);
+        sc.addAnd("zoneId", SearchCriteria.Op.SC, scc);
+        return listBy(sc);
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java
new file mode 100644
index 0000000..a85e6ee
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java
@@ -0,0 +1,153 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.admin.kubernetes.version;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.command.admin.AdminCmd;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesVersionService;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Strings;
+
+@APICommand(name = AddKubernetesSupportedVersionCmd.APINAME,
+        description = "Add a supported Kubernetes version",
+        responseObject = KubernetesSupportedVersionResponse.class,
+        responseView = ResponseObject.ResponseView.Full,
+        entityType = {KubernetesSupportedVersion.class},
+        authorized = {RoleType.Admin})
+public class AddKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd {
+    public static final Logger LOGGER = Logger.getLogger(AddKubernetesSupportedVersionCmd.class.getName());
+    public static final String APINAME = "addKubernetesSupportedVersion";
+
+    @Inject
+    private KubernetesVersionService kubernetesVersionService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.NAME, type = CommandType.STRING,
+            description = "the name of the Kubernetes supported version")
+    private String name;
+
+    @Parameter(name = ApiConstants.SEMANTIC_VERSION, type = CommandType.STRING, required = true,
+            description = "the semantic version of the Kubernetes version")
+    private String semanticVersion;
+
+    @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID,
+            entityType = ZoneResponse.class,
+            description = "the ID of the zone in which Kubernetes supported version will be available")
+    private Long zoneId;
+
+    @Parameter(name = ApiConstants.URL, type = CommandType.STRING,
+            description = "the URL of the binaries ISO for Kubernetes supported version")
+    private String url;
+
+    @Parameter(name = ApiConstants.CHECKSUM, type = CommandType.STRING,
+            description = "the checksum value of the binaries ISO. " + ApiConstants.CHECKSUM_PARAMETER_PREFIX_DESCRIPTION)
+    private String checksum;
+
+    @Parameter(name = ApiConstants.MIN_CPU_NUMBER, type = CommandType.INTEGER, required = true,
+            description = "the minimum number of CPUs to be set with the Kubernetes version")
+    private Integer minimumCpu;
+
+    @Parameter(name = ApiConstants.MIN_MEMORY, type = CommandType.INTEGER, required = true,
+            description = "the minimum RAM size in MB to be set with the Kubernetes version")
+    private Integer minimumRamSize;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+
+    public String getName() {
+        return name;
+    }
+
+    public String getSemanticVersion() {
+        if(Strings.isNullOrEmpty(semanticVersion)) {
+            throw new InvalidParameterValueException("Version can not be null");
+        }
+        if(!semanticVersion.matches("[0-9]+(\\.[0-9]+)*")) {
+            throw new IllegalArgumentException("Invalid version format. Semantic version needed");
+        }
+        return semanticVersion;
+    }
+
+    public Long getZoneId() {
+        return zoneId;
+    }
+
+    public String getUrl() {
+        return url;
+    }
+
+    public String getChecksum() {
+        return checksum;
+    }
+
+    public Integer getMinimumCpu() {
+        return minimumCpu;
+    }
+
+    public Integer getMinimumRamSize() {
+        return minimumRamSize;
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccountId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            KubernetesSupportedVersionResponse response = kubernetesVersionService.addKubernetesSupportedVersion(this);
+            if (response == null) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Kubernetes supported version");
+            }
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java
similarity index 51%
copy from api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
copy to plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java
index 103e922..0248914 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java
@@ -14,109 +14,91 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-package org.apache.cloudstack.api.command.user.iso;
 
-import org.apache.log4j.Logger;
+package org.apache.cloudstack.api.command.admin.kubernetes.version;
+
+import javax.inject.Inject;
 
+import org.apache.cloudstack.acl.RoleType;
 import org.apache.cloudstack.api.APICommand;
-import org.apache.cloudstack.api.ApiCommandJobType;
 import org.apache.cloudstack.api.ApiConstants;
 import org.apache.cloudstack.api.ApiErrorCode;
 import org.apache.cloudstack.api.BaseAsyncCmd;
 import org.apache.cloudstack.api.Parameter;
 import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.command.admin.AdminCmd;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
 import org.apache.cloudstack.api.response.SuccessResponse;
-import org.apache.cloudstack.api.response.TemplateResponse;
-import org.apache.cloudstack.api.response.ZoneResponse;
 import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesVersionEventTypes;
+import com.cloud.kubernetes.version.KubernetesVersionService;
+import com.cloud.utils.exception.CloudRuntimeException;
 
-import com.cloud.event.EventTypes;
-import com.cloud.template.VirtualMachineTemplate;
-import com.cloud.user.Account;
+@APICommand(name = DeleteKubernetesSupportedVersionCmd.APINAME,
+        description = "Deletes a Kubernetes cluster",
+        responseObject = SuccessResponse.class,
+        entityType = {KubernetesSupportedVersion.class},
+        authorized = {RoleType.Admin})
+public class DeleteKubernetesSupportedVersionCmd extends BaseAsyncCmd implements AdminCmd {
+    public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesSupportedVersionCmd.class.getName());
+    public static final String APINAME = "deleteKubernetesSupportedVersion";
 
-@APICommand(name = "deleteIso", description = "Deletes an ISO file.", responseObject = SuccessResponse.class,
-        requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
-public class DeleteIsoCmd extends BaseAsyncCmd {
-    public static final Logger s_logger = Logger.getLogger(DeleteIsoCmd.class.getName());
-    private static final String s_name = "deleteisoresponse";
+    @Inject
+    private KubernetesVersionService kubernetesVersionService;
 
     /////////////////////////////////////////////////////
     //////////////// API parameters /////////////////////
     /////////////////////////////////////////////////////
-
-    @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = TemplateResponse.class, required = true, description = "the ID of the ISO file")
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesSupportedVersionResponse.class,
+            description = "the ID of the Kubernetes supported version",
+            required = true)
     private Long id;
 
-    @Parameter(name = ApiConstants.ZONE_ID,
-               type = CommandType.UUID,
-               entityType = ZoneResponse.class,
-               description = "the ID of the zone of the ISO file. If not specified, the ISO will be deleted from all the zones")
-    private Long zoneId;
-
     /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
     /////////////////////////////////////////////////////
-
     public Long getId() {
         return id;
     }
 
-    public Long getZoneId() {
-        return zoneId;
-    }
-
-    /////////////////////////////////////////////////////
-    /////////////// API Implementation///////////////////
-    /////////////////////////////////////////////////////
-
     @Override
     public String getCommandName() {
-        return s_name;
-    }
-
-    public static String getStaticName() {
-        return s_name;
+        return APINAME.toLowerCase() + "response";
     }
 
     @Override
     public long getEntityOwnerId() {
-        VirtualMachineTemplate iso = _entityMgr.findById(VirtualMachineTemplate.class, getId());
-        if (iso != null) {
-            return iso.getAccountId();
-        }
-
-        return Account.ACCOUNT_ID_SYSTEM;
+        return CallContext.current().getCallingAccountId();
     }
 
     @Override
     public String getEventType() {
-        return EventTypes.EVENT_ISO_DELETE;
+        return KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE;
     }
 
     @Override
     public String getEventDescription() {
-        return "Deleting ISO " + getId();
-    }
-
-    @Override
-    public ApiCommandJobType getInstanceType() {
-        return ApiCommandJobType.Iso;
-    }
-
-    @Override
-    public Long getInstanceId() {
-        return getId();
+        return "Deleting Kubernetes supported version " + getId();
     }
 
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
     @Override
-    public void execute() {
-        CallContext.current().setEventDetails("ISO Id: " + getId());
-        boolean result = _templateService.deleteIso(this);
-        if (result) {
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            if (!kubernetesVersionService.deleteKubernetesSupportedVersion(this)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes supported version ID: %d", getId()));
+            }
             SuccessResponse response = new SuccessResponse(getCommandName());
-            this.setResponseObject(response);
-        } else {
-            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete ISO");
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
         }
     }
 }
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java
new file mode 100644
index 0000000..bf888c5
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java
@@ -0,0 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.admin.kubernetes.version;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.command.admin.AdminCmd;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.kubernetes.version.KubernetesVersionService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = UpdateKubernetesSupportedVersionCmd.APINAME,
+        description = "Update a supported Kubernetes version",
+        responseObject = KubernetesSupportedVersionResponse.class,
+        responseView = ResponseObject.ResponseView.Full,
+        entityType = {KubernetesSupportedVersion.class},
+        authorized = {RoleType.Admin})
+public class UpdateKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd {
+    public static final Logger LOGGER = Logger.getLogger(UpdateKubernetesSupportedVersionCmd.class.getName());
+    public static final String APINAME = "updateKubernetesSupportedVersion";
+
+    @Inject
+    private KubernetesVersionService kubernetesVersionService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID,
+            entityType = KubernetesSupportedVersionResponse.class,
+            description = "the ID of the Kubernetes supported version",
+            required = true)
+    private Long id;
+
+    @Parameter(name = ApiConstants.STATE, type = CommandType.STRING,
+            description = "the enabled or disabled state of the Kubernetes supported version",
+            required = true)
+    private String state;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+    public Long getId() {
+        return id;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return 0;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            KubernetesSupportedVersionResponse response = kubernetesVersionService.updateKubernetesSupportedVersion(this);
+            if (response == null) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Kubernetes supported version");
+            }
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java
new file mode 100644
index 0000000..32b07c4
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java
@@ -0,0 +1,297 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.acl.SecurityChecker.AccessType;
+import org.apache.cloudstack.api.ACL;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiCommandJobType;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCreateCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject.ResponseView;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.DomainResponse;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.api.response.NetworkResponse;
+import org.apache.cloudstack.api.response.ProjectResponse;
+import org.apache.cloudstack.api.response.ServiceOfferingResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = CreateKubernetesClusterCmd.APINAME,
+        description = "Creates a Kubernetes cluster",
+        responseObject = KubernetesClusterResponse.class,
+        responseView = ResponseView.Restricted,
+        entityType = {KubernetesCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
+    public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "createKubernetesCluster";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "name for the Kubernetes cluster")
+    private String name;
+
+    @Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, required = true, description = "description for the Kubernetes cluster")
+    private String description;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true,
+            description = "availability zone in which Kubernetes cluster to be launched")
+    private Long zoneId;
+
+    @Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID, entityType = KubernetesSupportedVersionResponse.class, required = true,
+            description = "Kubernetes version with which cluster to be launched")
+    private Long kubernetesVersionId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
+            required = true, description = "the ID of the service offering for the virtual machines in the cluster.")
+    private Long serviceOfferingId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" +
+            " virtual machine. Must be used with domainId.")
+    private String accountName;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class,
+            description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.")
+    private Long domainId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class,
+            description = "Deploy cluster for the project")
+    private Long projectId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class,
+            description = "Network in which Kubernetes cluster is to be launched")
+    private Long networkId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING,
+            description = "name of the ssh key pair used to login to the virtual machines")
+    private String sshKeyPairName;
+
+    @Parameter(name=ApiConstants.MASTER_NODES, type = CommandType.LONG,
+            description = "number of Kubernetes cluster master nodes, default is 1")
+    private Long masterNodes;
+
+    @Parameter(name=ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, type = CommandType.STRING,
+            description = "external load balancer IP address while using shared network with Kubernetes HA cluster")
+    private String externalLoadBalancerIpAddress;
+
+    @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
+            required = true, description = "number of Kubernetes cluster worker nodes")
+    private Long clusterSize;
+
+    @Parameter(name = ApiConstants.DOCKER_REGISTRY_USER_NAME, type = CommandType.STRING,
+            description = "user name for the docker image private registry")
+    private String dockerRegistryUserName;
+
+    @Parameter(name = ApiConstants.DOCKER_REGISTRY_PASSWORD, type = CommandType.STRING,
+            description = "password for the docker image private registry")
+    private String dockerRegistryPassword;
+
+    @Parameter(name = ApiConstants.DOCKER_REGISTRY_URL, type = CommandType.STRING,
+            description = "URL for the docker image private registry")
+    private String dockerRegistryUrl;
+
+    @Parameter(name = ApiConstants.DOCKER_REGISTRY_EMAIL, type = CommandType.STRING,
+            description = "email of the docker image private registry user")
+    private String dockerRegistryEmail;
+
+    @Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG,
+            description = "root disk size of root disk for each node")
+    private Long nodeRootDiskSize;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public String getAccountName() {
+        if (accountName == null) {
+            return CallContext.current().getCallingAccount().getAccountName();
+        }
+        return accountName;
+    }
+
+    public String getDisplayName() {
+        return description;
+    }
+
+    public Long getDomainId() {
+        if (domainId == null) {
+            return CallContext.current().getCallingAccount().getDomainId();
+        }
+        return domainId;
+    }
+
+    public Long getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public Long getZoneId() {
+        return zoneId;
+    }
+
+    public Long getKubernetesVersionId() {
+        return kubernetesVersionId;
+    }
+
+    public Long getNetworkId() { return networkId;}
+
+    public String getName() {
+        return name;
+    }
+
+    public String getSSHKeyPairName() {
+        return sshKeyPairName;
+    }
+
+    public Long getMasterNodes() {
+        if (masterNodes == null) {
+            return 1L;
+        }
+        return masterNodes;
+    }
+
+    public String getExternalLoadBalancerIpAddress() {
+        return externalLoadBalancerIpAddress;
+    }
+
+    public Long getClusterSize() {
+        return clusterSize;
+    }
+
+    public String getDockerRegistryUserName() {
+        return dockerRegistryUserName;
+    }
+
+    public String getDockerRegistryPassword() {
+        return dockerRegistryPassword;
+    }
+
+    public String getDockerRegistryUrl() {
+        return dockerRegistryUrl;
+    }
+
+    public String getDockerRegistryEmail() {
+        return dockerRegistryEmail;
+    }
+
+    public Long getNodeRootDiskSize() {
+        return nodeRootDiskSize;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    public static String getResultObjectName() {
+        return "kubernetescluster";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true);
+        if (accountId == null) {
+            return CallContext.current().getCallingAccount().getId();
+        }
+
+        return accountId;
+    }
+
+    @Override
+    public String getEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE;
+    }
+
+    @Override
+    public String getCreateEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE;
+    }
+
+    @Override
+    public String getCreateEventDescription() {
+        return "creating Kubernetes cluster";
+    }
+
+    @Override
+    public String getEventDescription() {
+        return "creating Kubernetes cluster. Cluster Id: " + getEntityId();
+    }
+
+    @Override
+    public ApiCommandJobType getInstanceType() {
+        return ApiCommandJobType.VirtualMachine;
+    }
+
+    @Override
+    public void execute() {
+        try {
+            if (!kubernetesClusterService.startKubernetesCluster(getEntityId(), true)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start Kubernetes cluster");
+            }
+            KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getEntityId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException e) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
+    }
+
+    @Override
+    public void create() throws CloudRuntimeException {
+        try {
+            KubernetesCluster cluster = kubernetesClusterService.createKubernetesCluster(this);
+            if (cluster == null) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Kubernetes cluster");
+            }
+            setEntityId(cluster.getId());
+            setEntityUuid(cluster.getUuid());
+        } catch (CloudRuntimeException e) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
+    }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java
similarity index 51%
copy from api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
copy to plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java
index 103e922..4f32138 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java
@@ -14,45 +14,51 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-package org.apache.cloudstack.api.command.user.iso;
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
 
-import org.apache.log4j.Logger;
+import javax.inject.Inject;
 
+import org.apache.cloudstack.acl.RoleType;
 import org.apache.cloudstack.api.APICommand;
-import org.apache.cloudstack.api.ApiCommandJobType;
 import org.apache.cloudstack.api.ApiConstants;
 import org.apache.cloudstack.api.ApiErrorCode;
 import org.apache.cloudstack.api.BaseAsyncCmd;
 import org.apache.cloudstack.api.Parameter;
 import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
 import org.apache.cloudstack.api.response.SuccessResponse;
-import org.apache.cloudstack.api.response.TemplateResponse;
-import org.apache.cloudstack.api.response.ZoneResponse;
 import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
 
-import com.cloud.event.EventTypes;
-import com.cloud.template.VirtualMachineTemplate;
-import com.cloud.user.Account;
+@APICommand(name = DeleteKubernetesClusterCmd.APINAME,
+        description = "Deletes a Kubernetes cluster",
+        responseObject = SuccessResponse.class,
+        entityType = {KubernetesCluster.class},
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class DeleteKubernetesClusterCmd extends BaseAsyncCmd {
+    public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "deleteKubernetesCluster";
 
-@APICommand(name = "deleteIso", description = "Deletes an ISO file.", responseObject = SuccessResponse.class,
-        requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
-public class DeleteIsoCmd extends BaseAsyncCmd {
-    public static final Logger s_logger = Logger.getLogger(DeleteIsoCmd.class.getName());
-    private static final String s_name = "deleteisoresponse";
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
 
     /////////////////////////////////////////////////////
     //////////////// API parameters /////////////////////
     /////////////////////////////////////////////////////
 
-    @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = TemplateResponse.class, required = true, description = "the ID of the ISO file")
+    @Parameter(name = ApiConstants.ID,
+            type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class,
+            required = true,
+            description = "the ID of the Kubernetes cluster")
     private Long id;
 
-    @Parameter(name = ApiConstants.ZONE_ID,
-               type = CommandType.UUID,
-               entityType = ZoneResponse.class,
-               description = "the ID of the zone of the ISO file. If not specified, the ISO will be deleted from all the zones")
-    private Long zoneId;
-
     /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
     /////////////////////////////////////////////////////
@@ -61,62 +67,43 @@ public class DeleteIsoCmd extends BaseAsyncCmd {
         return id;
     }
 
-    public Long getZoneId() {
-        return zoneId;
-    }
-
     /////////////////////////////////////////////////////
     /////////////// API Implementation///////////////////
     /////////////////////////////////////////////////////
 
     @Override
-    public String getCommandName() {
-        return s_name;
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            if (!kubernetesClusterService.deleteKubernetesCluster(id)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster ID: %d", getId()));
+            }
+            SuccessResponse response = new SuccessResponse(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException e) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
     }
 
-    public static String getStaticName() {
-        return s_name;
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
     }
 
     @Override
     public long getEntityOwnerId() {
-        VirtualMachineTemplate iso = _entityMgr.findById(VirtualMachineTemplate.class, getId());
-        if (iso != null) {
-            return iso.getAccountId();
-        }
-
-        return Account.ACCOUNT_ID_SYSTEM;
+        return CallContext.current().getCallingAccount().getId();
     }
 
+
     @Override
     public String getEventType() {
-        return EventTypes.EVENT_ISO_DELETE;
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_DELETE;
     }
 
     @Override
     public String getEventDescription() {
-        return "Deleting ISO " + getId();
-    }
-
-    @Override
-    public ApiCommandJobType getInstanceType() {
-        return ApiCommandJobType.Iso;
-    }
-
-    @Override
-    public Long getInstanceId() {
-        return getId();
+        KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
+        return String.format("Deleting Kubernetes cluster ID: %s", cluster.getUuid());
     }
 
-    @Override
-    public void execute() {
-        CallContext.current().setEventDetails("ISO Id: " + getId());
-        boolean result = _templateService.deleteIso(this);
-        if (result) {
-            SuccessResponse response = new SuccessResponse(getCommandName());
-            this.setResponseObject(response);
-        } else {
-            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete ISO");
-        }
-    }
 }
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java
new file mode 100644
index 0000000..c88f0eb
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.user.Account;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+
+@APICommand(name = GetKubernetesClusterConfigCmd.APINAME,
+        description = "Get Kubernetes cluster config",
+        responseObject = KubernetesClusterConfigResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class GetKubernetesClusterConfigCmd extends BaseCmd {
+    public static final Logger LOGGER = Logger.getLogger(GetKubernetesClusterConfigCmd.class.getName());
+    public static final String APINAME = "getKubernetesClusterConfig";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public long getEntityOwnerId() {
+        Account account = CallContext.current().getCallingAccount();
+        if (account != null) {
+            return account.getId();
+        }
+
+        return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public void execute() throws ServerApiException {
+        try {
+            KubernetesClusterConfigResponse response = kubernetesClusterService.getKubernetesClusterConfig(this);
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException e) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java
new file mode 100644
index 0000000..ef960d5
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java
@@ -0,0 +1,100 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject.ResponseView;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.log4j.Logger;
+
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = ListKubernetesClustersCmd.APINAME,
+        description = "Lists Kubernetes clusters",
+        responseObject = KubernetesClusterResponse.class,
+        responseView = ResponseView.Restricted,
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class ListKubernetesClustersCmd extends BaseListProjectAndAccountResourcesCmd {
+    public static final Logger LOGGER = Logger.getLogger(ListKubernetesClustersCmd.class.getName());
+    public static final String APINAME = "listKubernetesClusters";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "state of the Kubernetes cluster")
+    private String state;
+
+    @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the Kubernetes cluster" +
+            " (a substring match is made against the parameter value, data for all matching Kubernetes clusters will be returned)")
+    private String name;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public void execute() throws ServerApiException {
+        try {
+            ListResponse<KubernetesClusterResponse> response = kubernetesClusterService.listKubernetesClusters(this);
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException e) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java
new file mode 100644
index 0000000..90ccfa4
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java
@@ -0,0 +1,128 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.acl.SecurityChecker;
+import org.apache.cloudstack.api.ACL;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.ServiceOfferingResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = ScaleKubernetesClusterCmd.APINAME,
+        description = "Scales a created, running or stopped Kubernetes cluster",
+        responseObject = KubernetesClusterResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {KubernetesCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
+    public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "scaleKubernetesCluster";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    @ACL(accessType = SecurityChecker.AccessType.UseEntry)
+    @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
+            description = "the ID of the service offering for the virtual machines in the cluster.")
+    private Long serviceOfferingId;
+
+    @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
+            description = "number of Kubernetes cluster nodes")
+    private Long clusterSize;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    public Long getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public Long getClusterSize() {
+        return clusterSize;
+    }
+
+    @Override
+    public String getEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE;
+    }
+
+    @Override
+    public String getEventDescription() {
+        KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
+        return String.format("Scaling Kubernetes cluster ID: %s", cluster.getUuid());
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            if (!kubernetesClusterService.scaleKubernetesCluster(this)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster ID: %d", getId()));
+            }
+            final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java
new file mode 100644
index 0000000..1ce2fe0
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java
@@ -0,0 +1,120 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = StartKubernetesClusterCmd.APINAME, description = "Starts a stopped Kubernetes cluster",
+        responseObject = KubernetesClusterResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {KubernetesCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class StartKubernetesClusterCmd extends BaseAsyncCmd {
+    public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "startKubernetesCluster";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class, required = true,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    @Override
+    public String getEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_START;
+    }
+
+    @Override
+    public String getEventDescription() {
+        KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
+        return String.format("Starting Kubernetes cluster ID: %s", cluster.getUuid());
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    public KubernetesCluster validateRequest() {
+        if (getId() == null || getId() < 1L) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid Kubernetes cluster ID provided");
+        }
+        final KubernetesCluster kubernetesCluster = kubernetesClusterService.findById(getId());
+        if (kubernetesCluster == null) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Given Kubernetes cluster was not found");
+        }
+        return kubernetesCluster;
+    }
+
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        final KubernetesCluster kubernetesCluster = validateRequest();
+        try {
+            if (!kubernetesClusterService.startKubernetesCluster(kubernetesCluster.getId(), false)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId()));
+            }
+            final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(kubernetesCluster.getId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java
new file mode 100644
index 0000000..ba2649f
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = StopKubernetesClusterCmd.APINAME, description = "Stops a running Kubernetes cluster",
+        responseObject = SuccessResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {KubernetesCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class StopKubernetesClusterCmd extends BaseAsyncCmd {
+    public static final Logger LOGGER = Logger.getLogger(StopKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "stopKubernetesCluster";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class, required = true,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    @Override
+    public String getEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_STOP;
+    }
+
+    @Override
+    public String getEventDescription() {
+        KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
+        return String.format("Stopping Kubernetes cluster ID: %s", cluster.getUuid());
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            if (!kubernetesClusterService.stopKubernetesCluster(getId())) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId()));
+            }
+            final SuccessResponse response = new SuccessResponse(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java
new file mode 100644
index 0000000..2c99b00
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java
@@ -0,0 +1,118 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.user.kubernetes.cluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesClusterResponse;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
+import com.cloud.kubernetes.cluster.KubernetesClusterService;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+@APICommand(name = UpgradeKubernetesClusterCmd.APINAME, description = "Upgrades a running Kubernetes cluster",
+        responseObject = KubernetesClusterResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {KubernetesCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd {
+    public static final Logger LOGGER = Logger.getLogger(UpgradeKubernetesClusterCmd.class.getName());
+    public static final String APINAME = "upgradeKubernetesCluster";
+
+    @Inject
+    public KubernetesClusterService kubernetesClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesClusterResponse.class, required = true,
+            description = "the ID of the Kubernetes cluster")
+    private Long id;
+
+    @Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID,
+            entityType = KubernetesSupportedVersionResponse.class, required = true,
+            description = "the ID of the Kubernetes version for upgrade")
+    private Long kubernetesVersionId;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    public Long getKubernetesVersionId() {
+        return kubernetesVersionId;
+    }
+
+    @Override
+    public String getEventType() {
+        return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_UPGRADE;
+    }
+
+    @Override
+    public String getEventDescription() {
+        KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
+        return String.format("Upgrading Kubernetes cluster ID: %s", cluster.getUuid());
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        try {
+            if (!kubernetesClusterService.upgradeKubernetesCluster(this)) {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %d", getId()));
+            }
+            final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (CloudRuntimeException ex) {
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java
new file mode 100644
index 0000000..efa029a
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java
@@ -0,0 +1,109 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.user.kubernetes.version;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseListCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.log4j.Logger;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.kubernetes.version.KubernetesVersionService;
+import com.google.common.base.Strings;
+
+@APICommand(name = ListKubernetesSupportedVersionsCmd.APINAME,
+        description = "Lists container clusters",
+        responseObject = KubernetesSupportedVersionResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class ListKubernetesSupportedVersionsCmd extends BaseListCmd {
+    public static final Logger LOGGER = Logger.getLogger(ListKubernetesSupportedVersionsCmd.class.getName());
+    public static final String APINAME = "listKubernetesSupportedVersions";
+
+    @Inject
+    private KubernetesVersionService kubernetesVersionService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = KubernetesSupportedVersionResponse.class,
+            description = "the ID of the Kubernetes supported version")
+    private Long id;
+
+    @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID,
+            entityType = ZoneResponse.class,
+            description = "the ID of the zone in which Kubernetes supported version will be available")
+    private Long zoneId;
+
+    @Parameter(name = ApiConstants.MIN_SEMANTIC_VERSION, type = CommandType.STRING,
+            description = "the minimum semantic version for the Kubernetes supported version to be listed")
+    private String minimumSemanticVersion;
+
+    @Parameter(name = ApiConstants.MIN_KUBERNETES_VERSION_ID, type = CommandType.UUID,
+            entityType = KubernetesSupportedVersionResponse.class,
+            description = "the ID of the minimum Kubernetes supported version")
+    private Long minimumKubernetesVersionId;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+    public Long getId() {
+        return id;
+    }
+
+    public Long getZoneId() {
+        return zoneId;
+    }
+
+    public String getMinimumSemanticVersion() {
+        if(!Strings.isNullOrEmpty(minimumSemanticVersion) &&
+                !minimumSemanticVersion.matches("[0-9]+(\\.[0-9]+)*")) {
+            throw new IllegalArgumentException("Invalid version format");
+        }
+        return minimumSemanticVersion;
+    }
+
+    public Long getMinimumKubernetesVersionId() {
+        return minimumKubernetesVersionId;
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+    @Override
+    public void execute() throws ServerApiException, ConcurrentOperationException {
+        ListResponse<KubernetesSupportedVersionResponse> response = kubernetesVersionService.listKubernetesSupportedVersions(this);
+        response.setResponseName(getCommandName());
+        setResponseObject(response);
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java
new file mode 100644
index 0000000..0308518
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.response;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+
+public class KubernetesClusterConfigResponse extends BaseResponse {
+    @SerializedName(ApiConstants.ID)
+    @Param(description = "the id of the container cluster")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME)
+    @Param(description = "Name of the container cluster")
+    private String name;
+
+    @SerializedName("configdata")
+    @Param(description = "the config data of the cluster")
+    private String configData;
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getConfigData() {
+        return configData;
+    }
+
+    public void setConfigData(String configData) {
+        this.configData = configData;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java
new file mode 100644
index 0000000..2c6fc81
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java
@@ -0,0 +1,329 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.response;
+
+import java.util.List;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.kubernetes.cluster.KubernetesCluster;
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+
+@SuppressWarnings("unused")
+@EntityReference(value = {KubernetesCluster.class})
+public class KubernetesClusterResponse extends BaseResponse implements ControlledEntityResponse {
+    @SerializedName(ApiConstants.ID)
+    @Param(description = "the id of the Kubernetes cluster")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME)
+    @Param(description = "the name of the Kubernetes cluster")
+    private String name;
+
+    @SerializedName(ApiConstants.DESCRIPTION)
+    @Param(description = "the description of the Kubernetes cluster")
+    private String description;
+
+    @SerializedName(ApiConstants.ZONE_ID)
+    @Param(description = "the name of the zone of the Kubernetes cluster")
+    private String zoneId;
+
+    @SerializedName(ApiConstants.ZONE_NAME)
+    @Param(description = "the name of the zone of the Kubernetes cluster")
+    private String zoneName;
+
+    @SerializedName(ApiConstants.SERVICE_OFFERING_ID)
+    @Param(description = "the ID of the service offering of the Kubernetes cluster")
+    private String serviceOfferingId;
+
+    @SerializedName("serviceofferingname")
+    @Param(description = "the name of the service offering of the Kubernetes cluster")
+    private String serviceOfferingName;
+
+    @SerializedName(ApiConstants.TEMPLATE_ID)
+    @Param(description = "the ID of the template of the Kubernetes cluster")
+    private String templateId;
+
+    @SerializedName(ApiConstants.NETWORK_ID)
+    @Param(description = "the ID of the network of the Kubernetes cluster")
+    private String networkId;
+
+    @SerializedName(ApiConstants.ASSOCIATED_NETWORK_NAME)
+    @Param(description = "the name of the network of the Kubernetes cluster")
+    private String associatedNetworkName;
+
+    @SerializedName(ApiConstants.KUBERNETES_VERSION_ID)
+    @Param(description = "the ID of the Kubernetes version for the Kubernetes cluster")
+    private String kubernetesVersionId;
+
+    @SerializedName(ApiConstants.KUBERNETES_VERSION_NAME)
+    @Param(description = "the name of the Kubernetes version for the Kubernetes cluster")
+    private String kubernetesVersionName;
+
+    @SerializedName(ApiConstants.ACCOUNT)
+    @Param(description = "the account associated with the Kubernetes cluster")
+    private String accountName;
+
+    @SerializedName(ApiConstants.PROJECT_ID)
+    @Param(description = "the project id of the Kubernetes cluster")
+    private String projectId;
+
+    @SerializedName(ApiConstants.PROJECT)
+    @Param(description = "the project name of the Kubernetes cluster")
+    private String projectName;
+
+    @SerializedName(ApiConstants.DOMAIN_ID)
+    @Param(description = "the ID of the domain in which the Kubernetes cluster exists")
+    private String domainId;
+
+    @SerializedName(ApiConstants.DOMAIN)
+    @Param(description = "the name of the domain in which the Kubernetes cluster exists")
+    private String domainName;
+
+    @SerializedName(ApiConstants.SSH_KEYPAIR)
+    @Param(description = "keypair details")
+    private String keypair;
+
+    @SerializedName(ApiConstants.MASTER_NODES)
+    @Param(description = "the master nodes count for the Kubernetes cluster")
+    private Long masterNodes;
+
+    @SerializedName(ApiConstants.SIZE)
+    @Param(description = "the size (worker nodes count) of the Kubernetes cluster")
+    private Long clusterSize;
+
+    @SerializedName(ApiConstants.STATE)
+    @Param(description = "the state of the Kubernetes cluster")
+    private String state;
+
+    @SerializedName(ApiConstants.CPU_NUMBER)
+    @Param(description = "the cpu cores of the Kubernetes cluster")
+    private String cores;
+
+    @SerializedName(ApiConstants.MEMORY)
+    @Param(description = "the memory the Kubernetes cluster")
+    private String memory;
+
+    @SerializedName(ApiConstants.END_POINT)
+    @Param(description = "URL end point for the Kubernetes cluster")
+    private String endpoint;
+
+    @SerializedName(ApiConstants.CONSOLE_END_POINT)
+    @Param(description = "URL end point for the Kubernetes cluster dashboard UI")
+    private String consoleEndpoint;
+
+    @SerializedName(ApiConstants.VIRTUAL_MACHINE_IDS)
+    @Param(description = "the list of virtualmachine IDs associated with this Kubernetes cluster")
+    private List<String> virtualMachineIds;
+
+    public KubernetesClusterResponse() {
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    public String getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(String zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public String getZoneName() {
+        return zoneName;
+    }
+
+    public void setZoneName(String zoneName) {
+        this.zoneName = zoneName;
+    }
+
+    public String getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public void setServiceOfferingId(String serviceOfferingId) {
+        this.serviceOfferingId = serviceOfferingId;
+    }
+
+    public String getTemplateId() {
+        return templateId;
+    }
+
+    public void setTemplateId(String templateId) {
+        this.templateId = templateId;
+    }
+
+    public String getNetworkId() {
+        return networkId;
+    }
+
+    public void setNetworkId(String networkId) {
+        this.networkId = networkId;
+    }
+
+    public String getAssociatedNetworkName() {
+        return associatedNetworkName;
+    }
+
+    public void setAssociatedNetworkName(String associatedNetworkName) {
+        this.associatedNetworkName = associatedNetworkName;
+    }
+
+    public String getKubernetesVersionId() {
+        return kubernetesVersionId;
+    }
+
+    public void setKubernetesVersionId(String kubernetesVersionId) {
+        this.kubernetesVersionId = kubernetesVersionId;
+    }
+
+    public String getKubernetesVersionName() {
+        return kubernetesVersionName;
+    }
+
+    public void setKubernetesVersionName(String kubernetesVersionName) {
+        this.kubernetesVersionName = kubernetesVersionName;
+    }
+
+    public String getProjectId() {
+        return projectId;
+    }
+
+    @Override
+    public void setAccountName(String accountName) {
+        this.accountName = accountName;
+    }
+
+    @Override
+    public void setProjectId(String projectId) {
+        this.projectId = projectId;
+    }
+
+    @Override
+    public void setProjectName(String projectName) {
+        this.projectName = projectName;
+    }
+
+    @Override
+    public void setDomainId(String domainId) {
+        this.domainId = domainId;
+    }
+
+    @Override
+    public void setDomainName(String domainName) {
+        this.domainName = domainName;
+    }
+
+    public String getKeypair() {
+        return keypair;
+    }
+
+    public void setKeypair(String keypair) {
+        this.keypair = keypair;
+    }
+
+    public Long getMasterNodes() {
+        return masterNodes;
+    }
+
+    public void setMasterNodes(Long masterNodes) {
+        this.masterNodes = masterNodes;
+    }
+
+    public Long getClusterSize() {
+        return clusterSize;
+    }
+
+    public void setClusterSize(Long clusterSize) {
+        this.clusterSize = clusterSize;
+    }
+
+    public String getCores() {
+        return cores;
+    }
+
+    public void setCores(String cores) {
+        this.cores = cores;
+    }
+
+    public String getMemory() {
+        return memory;
+    }
+
+    public void setMemory(String memory) {
+        this.memory = memory;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public void setState(String state) {
+        this.state = state;
+    }
+
+    public String getEndpoint() {
+        return endpoint;
+    }
+
+    public void setEndpoint(String endpoint) {
+        this.endpoint = endpoint;
+    }
+
+    public String getId() {
+        return this.id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getServiceOfferingName() {
+        return serviceOfferingName;
+    }
+
+    public void setServiceOfferingName(String serviceOfferingName) {
+        this.serviceOfferingName = serviceOfferingName;
+    }
+
+    public void setVirtualMachineIds(List<String> virtualMachineIds) {
+        this.virtualMachineIds = virtualMachineIds;
+    }
+
+    ;
+
+    public List<String> getVirtualMachineIds() {
+        return virtualMachineIds;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java
new file mode 100644
index 0000000..4deb50d
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java
@@ -0,0 +1,174 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.response;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.kubernetes.version.KubernetesSupportedVersion;
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+
+@SuppressWarnings("unused")
+@EntityReference(value = {KubernetesSupportedVersion.class})
+public class KubernetesSupportedVersionResponse extends BaseResponse {
+    @SerializedName(ApiConstants.ID)
+    @Param(description = "the id of the Kubernetes supported version")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME)
+    @Param(description = "Name of the Kubernetes supported version")
+    private String name;
+
+    @SerializedName(ApiConstants.SEMANTIC_VERSION)
+    @Param(description = "Kubernetes semantic version")
+    private String semanticVersion;
+
+    @SerializedName(ApiConstants.ISO_ID)
+    @Param(description = "the id of the binaries ISO for Kubernetes supported version")
+    private String isoId;
+
+    @SerializedName(ApiConstants.ISO_NAME)
+    @Param(description = "the name of the binaries ISO for Kubernetes supported version")
+    private String isoName;
+
+    @SerializedName(ApiConstants.ISO_STATE)
+    @Param(description = "the state of the binaries ISO for Kubernetes supported version")
+    private String isoState;
+
+    @SerializedName(ApiConstants.ZONE_ID)
+    @Param(description = "the id of the zone in which Kubernetes supported version is available")
+    private String zoneId;
+
+    @SerializedName(ApiConstants.ZONE_NAME)
+    @Param(description = "the name of the zone in which Kubernetes supported version is available")
+    private String zoneName;
+
+    @SerializedName(ApiConstants.SUPPORTS_HA)
+    @Param(description = "whether Kubernetes supported version supports HA, multi-master")
+    private Boolean supportsHA;
+
+    @SerializedName(ApiConstants.STATE)
+    @Param(description = "the enabled or disabled state of the Kubernetes supported version")
+    private String state;
+
+    @SerializedName(ApiConstants.MIN_CPU_NUMBER)
+    @Param(description = "the minimum number of CPUs needed for the Kubernetes supported version")
+    private Integer minimumCpu;
+
+    @SerializedName(ApiConstants.MIN_MEMORY)
+    @Param(description = "the minimum RAM size in MB needed for the Kubernetes supported version")
+    private Integer minimumRamSize;
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getSemanticVersion() {
+        return semanticVersion;
+    }
+
+    public void setSemanticVersion(String semanticVersion) {
+        this.semanticVersion = semanticVersion;
+    }
+
+    public String getIsoId() {
+        return isoId;
+    }
+
+    public void setIsoId(String isoId) {
+        this.isoId = isoId;
+    }
+
+    public String getIsoName() {
+        return isoName;
+    }
+
+    public void setIsoName(String isoName) {
+        this.isoName = isoName;
+    }
+
+    public String getIsoState() {
+        return isoState;
+    }
+
+    public void setIsoState(String isoState) {
+        this.isoState = isoState;
+    }
+
+    public String getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(String zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public String getZoneName() {
+        return zoneName;
+    }
+
+    public void setZoneName(String zoneName) {
+        this.zoneName = zoneName;
+    }
+
+    public Boolean isSupportsHA() {
+        return supportsHA;
+    }
+
+    public void setSupportsHA(Boolean supportsHA) {
+        this.supportsHA = supportsHA;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public void setState(String state) {
+        this.state = state;
+    }
+
+    public Integer getMinimumCpu() {
+        return minimumCpu;
+    }
+
+    public void setMinimumCpu(Integer minimumCpu) {
+        this.minimumCpu = minimumCpu;
+    }
+
+    public Integer getMinimumRamSize() {
+        return minimumRamSize;
+    }
+
+    public void setMinimumRamSize(Integer minimumRamSize) {
+        this.minimumRamSize = minimumRamSize;
+    }
+}
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties
new file mode 100644
index 0000000..e6f02da
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+name=kubernetes-service
+parent=compute
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml
new file mode 100644
index 0000000..12f2a46
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml
@@ -0,0 +1,37 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements. See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership. The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License. You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied. See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:aop="http://www.springframework.org/schema/aop"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+                      http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
+                      http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
+                      http://www.springframework.org/schema/context
+                      http://www.springframework.org/schema/context/spring-context-3.0.xsd"
+                      >
+
+    <bean id="kubernetesSupportedVersionDaoImpl" class="com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDaoImpl" />
+    <bean id="kubernetesVersionManagerImpl" class="com.cloud.kubernetes.version.KubernetesVersionManagerImpl" />
+    <bean id="kubernetesClusterDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterDaoImpl" />
+    <bean id="kubernetesClusterDetailsDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDaoImpl" />
+    <bean id="kubernetesClusterVmMapDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDaoImpl" />
+    <bean id="kubernetesClusterManagerImpl" class="com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl" />
+
+</beans>
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml
new file mode 100644
index 0000000..787ea97
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml
@@ -0,0 +1,237 @@
+#cloud-config
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+---
+ssh_authorized_keys:
+  {{ k8s.ssh.pub.key }}
+
+write-files:
+  - path: /opt/bin/setup-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      ISO_MOUNT_DIR=/mnt/k8sdisk
+      BINARIES_DIR=${ISO_MOUNT_DIR}/
+      K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
+      ATTEMPT_ONLINE_INSTALL=false
+      setup_complete=false
+
+      OFFLINE_INSTALL_ATTEMPT_SLEEP=15
+      MAX_OFFLINE_INSTALL_ATTEMPTS=100
+      offline_attempts=1
+      MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
+      EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
+      crucial_cmd_attempts=1
+      iso_drive_path=""
+      while true; do
+        if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
+          echo "Warning: Offline install timed out!"
+          break
+        fi
+        set +e
+        output=`blkid -o device -t TYPE=iso9660`
+        set -e
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            if [ ! -d "${ISO_MOUNT_DIR}" ]; then
+              mkdir "${ISO_MOUNT_DIR}"
+            fi
+            retval=0
+            set +e
+            mount -o ro "${line}" "${ISO_MOUNT_DIR}"
+            retval=$?
+            set -e
+            if [ $retval -eq 0 ]; then
+              if [ -d "$BINARIES_DIR" ]; then
+                iso_drive_path="${line}"
+                break
+              else
+                umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
+              fi
+            fi
+          done <<< "$output"
+        fi
+        if [ -d "$BINARIES_DIR" ]; then
+          break
+        fi
+        echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
+        sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
+        offline_attempts=$[$offline_attempts + 1]
+      done
+
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+
+      if [ -d "$BINARIES_DIR" ]; then
+        ### Binaries available offline ###
+        echo "Installing binaries from ${BINARIES_DIR}"
+        mkdir -p /opt/cni/bin
+        tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+
+        output=`ls ${BINARIES_DIR}/docker/`
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            crucial_cmd_attempts=1
+            while true; do
+              if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+                echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
+                break;
+              fi
+              retval=0
+              set +e
+              docker load < "${BINARIES_DIR}/docker/$line"
+              retval=$?
+              set -e
+              if [ $retval -eq 0 ]; then
+                break;
+              fi
+              crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+            done
+          done <<< "$output"
+          setup_complete=true
+        fi
+        umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
+        if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
+          eject "${iso_drive_path}"
+        fi
+      fi
+      if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        ###  Binaries not available offline ###
+        RELEASE="v1.16.3"
+        CNI_VERSION="v0.7.5"
+        CRICTL_VERSION="v1.16.0"
+        echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
+        mkdir -p /opt/cni/bin
+        curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+      fi
+
+      systemctl enable kubelet && systemctl start kubelet
+      modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
+
+      if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        crucial_cmd_attempts=1
+        while true; do
+          if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+            echo "Warning: kubeadm pull images failed after multiple tries!"
+            break;
+          fi
+          retval=0
+          set +e
+          kubeadm config images pull
+          retval=$?
+          set -e
+          if [ $retval -eq 0 ]; then
+            break;
+          fi
+          crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+        done
+      fi
+
+  - path: /opt/bin/deploy-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
+        echo "setup-kube-system is running!"
+        exit 1
+      fi
+      modprobe ip_vs
+      modprobe ip_vs_wrr
+      modprobe ip_vs_sh
+      modprobe nf_conntrack_ipv4
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+      kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --control-plane --certificate-key {{ k8s_master.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification
+
+      sudo touch /home/core/success
+      echo "true" > /home/core/success
+
+coreos:
+  units:
+    - name: docker.service
+      command: start
+      enable: true
+
+    - name: setup-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        Requires=docker.service
+        After=docker.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        ExecStart=/opt/bin/setup-kube-system
+
+    - name: deploy-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        After=setup-kube-system.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        Restart=on-failure
+        ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version
+        ExecStart=/opt/bin/deploy-kube-system
+
+  update:
+    group: stable
+    reboot-strategy: off
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml
new file mode 100644
index 0000000..1482857
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml
@@ -0,0 +1,294 @@
+#cloud-config
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+---
+ssh_authorized_keys:
+  {{ k8s.ssh.pub.key }}
+
+write-files:
+  - path: /etc/conf.d/nfs
+    permissions: '0644'
+    content: |
+      OPTS_RPC_MOUNTD=""
+
+  - path: /etc/kubernetes/pki/cloudstack/ca.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_master.ca.crt }}
+
+  - path: /etc/kubernetes/pki/cloudstack/apiserver.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_master.apiserver.crt }}
+
+  - path: /etc/kubernetes/pki/cloudstack/apiserver.key
+    permissions: '0600'
+    content: |
+      {{ k8s_master.apiserver.key }}
+
+  - path: /opt/bin/setup-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      ISO_MOUNT_DIR=/mnt/k8sdisk
+      BINARIES_DIR=${ISO_MOUNT_DIR}/
+      K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
+      ATTEMPT_ONLINE_INSTALL=false
+      setup_complete=false
+
+      OFFLINE_INSTALL_ATTEMPT_SLEEP=15
+      MAX_OFFLINE_INSTALL_ATTEMPTS=100
+      offline_attempts=1
+      MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
+      EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
+      crucial_cmd_attempts=1
+      iso_drive_path=""
+      while true; do
+        if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
+          echo "Warning: Offline install timed out!"
+          break
+        fi
+        set +e
+        output=`blkid -o device -t TYPE=iso9660`
+        set -e
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            if [ ! -d "${ISO_MOUNT_DIR}" ]; then
+              mkdir "${ISO_MOUNT_DIR}"
+            fi
+            retval=0
+            set +e
+            mount -o ro "${line}" "${ISO_MOUNT_DIR}"
+            retval=$?
+            set -e
+            if [ $retval -eq 0 ]; then
+              if [ -d "$BINARIES_DIR" ]; then
+                iso_drive_path="${line}"
+                break
+              else
+                umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
+              fi
+            fi
+          done <<< "$output"
+        fi
+        if [ -d "$BINARIES_DIR" ]; then
+          break
+        fi
+        echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
+        sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
+        offline_attempts=$[$offline_attempts + 1]
+      done
+
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+
+      if [ -d "$BINARIES_DIR" ]; then
+        ### Binaries available offline ###
+        echo "Installing binaries from ${BINARIES_DIR}"
+        mkdir -p /opt/cni/bin
+        tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+
+        output=`ls ${BINARIES_DIR}/docker/`
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            crucial_cmd_attempts=1
+            while true; do
+              if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+                echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
+                break;
+              fi
+              retval=0
+              set +e
+              docker load < "${BINARIES_DIR}/docker/$line"
+              retval=$?
+              set -e
+              if [ $retval -eq 0 ]; then
+                break;
+              fi
+              crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+            done
+          done <<< "$output"
+          setup_complete=true
+        fi
+        mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
+        cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
+        umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
+        if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
+          eject "${iso_drive_path}"
+        fi
+      fi
+      if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        ###  Binaries not available offline ###
+        RELEASE="v1.16.3"
+        CNI_VERSION="v0.7.5"
+        CRICTL_VERSION="v1.16.0"
+        echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
+        mkdir -p /opt/cni/bin
+        curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+      fi
+
+      systemctl enable kubelet && systemctl start kubelet
+      modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
+
+      if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        crucial_cmd_attempts=1
+        while true; do
+          if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+            echo "Warning: kubeadm pull images failed after multiple tries!"
+            break;
+          fi
+          retval=0
+          set +e
+          kubeadm config images pull
+          retval=$?
+          set -e
+          if [ $retval -eq 0 ]; then
+            break;
+          fi
+          crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+        done
+      fi
+
+      crucial_cmd_attempts=1
+      while true; do
+        if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+          echo "Error: kubeadm init failed!"
+          exit 1
+        fi
+        retval=0
+        set +e
+        kubeadm init --token {{ k8s_master.cluster.token }} {{ k8s_master.cluster.initargs }}
+        retval=$?
+        set -e
+        if [ $retval -eq 0 ]; then
+          break;
+        fi
+        crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+      done
+
+  - path: /opt/bin/deploy-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
+
+      if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
+        echo "setup-kube-system is running!"
+        exit 1
+      fi
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+      export KUBECONFIG=/etc/kubernetes/admin.conf
+
+      mkdir -p /root/.kube
+      cp -i /etc/kubernetes/admin.conf /root/.kube/config
+      chown $(id -u):$(id -g) /root/.kube/config
+      echo export PATH=\$PATH:/opt/bin >> /root/.bashrc
+
+      if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then
+        ### Network, dashboard configs available offline ###
+        echo "Offline configs are available!"
+        kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml
+        kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml
+        rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
+      else
+        kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
+        kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
+      fi
+
+      kubectl create rolebinding admin-binding --role=admin --user=admin || true
+      kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
+      kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
+
+      sudo touch /home/core/success
+      echo "true" > /home/core/success
+
+coreos:
+  units:
+    - name: docker.service
+      command: start
+      enable: true
+
+    - name: setup-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        Requires=docker.service
+        After=docker.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        ExecStart=/opt/bin/setup-kube-system
+
+    - name: deploy-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        After=setup-kube-system.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        Restart=on-failure
+        ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version
+        ExecStart=/opt/bin/deploy-kube-system
+
+  update:
+    group: stable
+    reboot-strategy: off
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml
new file mode 100644
index 0000000..d2f5454
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml
@@ -0,0 +1,237 @@
+#cloud-config
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+---
+ssh_authorized_keys:
+  {{ k8s.ssh.pub.key }}
+
+write-files:
+  - path: /opt/bin/setup-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      ISO_MOUNT_DIR=/mnt/k8sdisk
+      BINARIES_DIR=${ISO_MOUNT_DIR}/
+      K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
+      ATTEMPT_ONLINE_INSTALL=false
+      setup_complete=false
+
+      OFFLINE_INSTALL_ATTEMPT_SLEEP=30
+      MAX_OFFLINE_INSTALL_ATTEMPTS=40
+      offline_attempts=1
+      MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
+      EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
+      crucial_cmd_attempts=1
+      iso_drive_path=""
+      while true; do
+        if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
+          echo "Warning: Offline install timed out!"
+          break
+        fi
+        set +e
+        output=`blkid -o device -t TYPE=iso9660`
+        set -e
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            if [ ! -d "${ISO_MOUNT_DIR}" ]; then
+              mkdir "${ISO_MOUNT_DIR}"
+            fi
+            retval=0
+            set +e
+            mount -o ro "${line}" "${ISO_MOUNT_DIR}"
+            retval=$?
+            set -e
+            if [ $retval -eq 0 ]; then
+              if [ -d "$BINARIES_DIR" ]; then
+                iso_drive_path="${line}"
+                break
+              else
+                umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
+              fi
+            fi
+          done <<< "$output"
+        fi
+        if [ -d "$BINARIES_DIR" ]; then
+          break
+        fi
+        echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
+        sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
+        offline_attempts=$[$offline_attempts + 1]
+      done
+
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+
+      if [ -d "$BINARIES_DIR" ]; then
+        ### Binaries available offline ###
+        echo "Installing binaries from ${BINARIES_DIR}"
+        mkdir -p /opt/cni/bin
+        tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+
+        output=`ls ${BINARIES_DIR}/docker/`
+        if [ "$output" != "" ]; then
+          while read -r line; do
+            crucial_cmd_attempts=1
+            while true; do
+              if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+                echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
+                break;
+              fi
+              retval=0
+              set +e
+              docker load < "${BINARIES_DIR}/docker/$line"
+              retval=$?
+              set -e
+              if [ $retval -eq 0 ]; then
+                break;
+              fi
+              crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+            done
+          done <<< "$output"
+          setup_complete=true
+        fi
+        umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
+        if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
+          eject "${iso_drive_path}"
+        fi
+      fi
+      if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        ###  Binaries not available offline ###
+        RELEASE="v1.16.3"
+        CNI_VERSION="v0.7.5"
+        CRICTL_VERSION="v1.16.0"
+        echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
+        mkdir -p /opt/cni/bin
+        curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
+
+        mkdir -p /opt/bin
+        curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
+
+        mkdir -p /opt/bin
+        cd /opt/bin
+        curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
+        chmod +x {kubeadm,kubelet,kubectl}
+
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
+        mkdir -p /etc/systemd/system/kubelet.service.d
+        curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+      fi
+
+      systemctl enable kubelet && systemctl start kubelet
+      modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
+
+      if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
+        crucial_cmd_attempts=1
+        while true; do
+          if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
+            echo "Warning: kubeadm pull images failed after multiple tries!"
+            break;
+          fi
+          retval=0
+          set +e
+          kubeadm config images pull
+          retval=$?
+          set -e
+          if [ $retval -eq 0 ]; then
+            break;
+          fi
+          crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
+        done
+      fi
+
+  - path: /opt/bin/deploy-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+
+      if [[ -f "/home/core/success" ]]; then
+      echo "Already provisioned!"
+      exit 0
+      fi
+
+      if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
+        echo "setup-kube-system is running!"
+        exit 1
+      fi
+      modprobe ip_vs
+      modprobe ip_vs_wrr
+      modprobe ip_vs_sh
+      modprobe nf_conntrack_ipv4
+      if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
+        export PATH=$PATH:/opt/bin
+      fi
+      kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --discovery-token-unsafe-skip-ca-verification
+
+      sudo touch /home/core/success
+      echo "true" > /home/core/success
+
+coreos:
+  units:
+    - name: docker.service
+      command: start
+      enable: true
+
+    - name: setup-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        Requires=docker.service
+        After=docker.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        ExecStart=/opt/bin/setup-kube-system
+
+    - name: deploy-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        After=setup-kube-system.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        Restart=on-failure
+        ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version
+        ExecStart=/opt/bin/deploy-kube-system
+
+  update:
+    group: stable
+    reboot-strategy: off
diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh
new file mode 100644
index 0000000..ea36d7e
--- /dev/null
+++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh
@@ -0,0 +1,133 @@
+#!/bin/bash -e
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Version 1.14 and below needs extra flags with kubeadm upgrade node
+if [ $# -lt 4 ]; then
+    echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_MASTER IS_OLD_VERSION IS_EJECT_ISO"
+    echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false"
+    exit 1
+fi
+UPGRADE_VERSION="${1}"
+IS_MAIN_MASTER=""
+if [ $# -gt 1 ]; then
+  IS_MAIN_MASTER="${2}"
+fi
+IS_OLD_VERSION=""
+if [ $# -gt 2 ]; then
+  IS_OLD_VERSION="${3}"
+fi
+EJECT_ISO_FROM_OS=false
+if [ $# -gt 3 ]; then
+  EJECT_ISO_FROM_OS="${4}"
+fi
+
+export PATH=$PATH:/opt/bin
+
+ISO_MOUNT_DIR=/mnt/k8sdisk
+BINARIES_DIR=${ISO_MOUNT_DIR}/
+
+OFFLINE_INSTALL_ATTEMPT_SLEEP=5
+MAX_OFFLINE_INSTALL_ATTEMPTS=10
+offline_attempts=1
+iso_drive_path=""
+while true; do
+  if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
+    echo "Warning: Offline install timed out!"
+    break
+  fi
+  set +e
+  output=`blkid -o device -t TYPE=iso9660`
+  set -e
+  if [ "$output" != "" ]; then
+    while read -r line; do
+      if [ ! -d "${ISO_MOUNT_DIR}" ]; then
+        mkdir "${ISO_MOUNT_DIR}"
+      fi
+      retval=0
+      set +e
+      mount -o ro "${line}" "${ISO_MOUNT_DIR}"
+      retval=$?
+      set -e
+      if [ $retval -eq 0 ]; then
+        if [ -d "$BINARIES_DIR" ]; then
+          iso_drive_path="${line}"
+          break
+        else
+          umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
+        fi
+      fi
+    done <<< "$output"
+  fi
+  if [ -d "$BINARIES_DIR" ]; then
+    break
+  fi
+  echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
+  sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
+  offline_attempts=$[$offline_attempts + 1]
+done
+
+if [ -d "$BINARIES_DIR" ]; then
+  ### Binaries available offline ###
+  echo "Installing binaries from ${BINARIES_DIR}"
+
+  cd /opt/bin
+
+  cp ${BINARIES_DIR}/k8s/kubeadm /opt/bin
+  chmod +x kubeadm
+
... 3652 lines suppressed ...