You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@cloudstack.apache.org by "ASF GitHub Bot (JIRA)" <ji...@apache.org> on 2018/04/30 09:54:02 UTC

[jira] [Commented] (CLOUDSTACK-9815) Application Container Service

    [ https://issues.apache.org/jira/browse/CLOUDSTACK-9815?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16458434#comment-16458434 ] 

ASF GitHub Bot commented on CLOUDSTACK-9815:
--------------------------------------------

DaanHoogland closed pull request #2059: WIP: CLOUDSTACK-9815 integration of ApplicationClusterService
URL: https://github.com/apache/cloudstack/pull/2059
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/deps/kubectl b/deps/kubectl
new file mode 100755
index 00000000000..a8f2a1e7027
Binary files /dev/null and b/deps/kubectl differ
diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec
index dc36c90bdc9..31dc16e3cf2 100644
--- a/packaging/centos7/cloud.spec
+++ b/packaging/centos7/cloud.spec
@@ -264,6 +264,10 @@ install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUI
 cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup
 cp -r client/target/cloud-client-ui-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client
 
+install -D utils/conf/k8s-master.yml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management
+install -D utils/conf/k8s-node.yml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management
+cp deps/kubectl ${RPM_BUILD_ROOT}%{_bindir}/
+
 # Don't package the scripts in the management webapp
 rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/scripts
 rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/vms
@@ -534,6 +538,8 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
 %{_defaultdocdir}/%{name}-management-%{version}/NOTICE
 %attr(0644,cloud,cloud) %{_localstatedir}/log/%{name}/management/catalina.out
 %attr(0644,root,root) %{_sysconfdir}/logrotate.d/%{name}-catalina
+%{_sysconfdir}/%{name}/management/*.yml
+%attr(0755,root,root) %{_bindir}/kubectl
 
 %files agent
 %attr(0755,root,root) %{_bindir}/%{name}-setup-agent
diff --git a/plugins/application-clusters/pom.xml b/plugins/application-clusters/pom.xml
new file mode 100644
index 00000000000..4dfaf90c136
--- /dev/null
+++ b/plugins/application-clusters/pom.xml
@@ -0,0 +1,222 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements. See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership. The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License. You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied. See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" 
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <groupId>org.apache.cloudstack</groupId>
+  <artifactId>cloud-plugin-shapeblue-acs</artifactId>
+  <name>Apache CloudStack Plugin - Application Cluster Service</name>
+  <description>Manages sets of VMs to be used in for instance Kubernetes application clusters using the CloudStack control plane</description>
+  <inceptionYear>2016</inceptionYear>
+
+  <parent>
+    <groupId>org.apache.cloudstack</groupId>
+    <artifactId>cloudstack-plugins</artifactId>
+    <version>4.10.0.0-SNAPSHOT</version>
+    <relativePath>../pom.xml</relativePath>
+  </parent>
+
+  <properties>
+    <cs.bcprov16.version>1.49</cs.bcprov16.version>
+  </properties>
+
+  <build>
+    <defaultGoal>install</defaultGoal>
+    <sourceDirectory>src</sourceDirectory>
+    <testSourceDirectory>test</testSourceDirectory>
+    <resources>
+      <resource>
+        <directory>${basedir}/resources</directory>
+      </resource>
+      <resource>
+        <directory>${basedir}/schema/db</directory>
+      </resource>
+    </resources>
+    <testResources>
+      <testResource>
+        <directory>test/resources</directory>
+      </testResource>
+    </testResources>
+    <outputDirectory>${basedir}/target/classes</outputDirectory>
+    <testOutputDirectory>${basedir}/target/test-classes</testOutputDirectory>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-utils</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-api</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-framework-config</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-framework-db</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-framework-security</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-schema</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-api</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-engine-components-api</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-server</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-framework-managed-context</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>javax.persistence</artifactId>
+      <version>${cs.jpa.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.inject</groupId>
+      <artifactId>javax.inject</artifactId>
+      <version>1</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.ejb</groupId>
+      <artifactId>ejb-api</artifactId>
+      <version>${cs.ejb.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>${cs.gson.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>${cs.guava.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>${cs.log4j.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-context</artifactId>
+      <version>${org.springframework.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-aop</artifactId>
+      <version>${org.springframework.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-beans</artifactId>
+      <version>${org.springframework.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-test</artifactId>
+      <version>${org.springframework.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+      <version>${cs.codec.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>${cs.junit.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.hamcrest</groupId>
+      <artifactId>hamcrest-library</artifactId>
+      <version>${cs.hamcrest.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <version>${cs.mockito.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+	  <groupId>org.powermock</groupId>
+	  <artifactId>powermock-module-junit4</artifactId>
+	  <version>${cs.powermock.version}</version>
+    </dependency>
+    <dependency>
+	  <groupId>org.powermock</groupId>
+	  <artifactId>powermock-api-mockito</artifactId>
+	  <version>${cs.powermock.version}</version>
+	  <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.flywaydb</groupId>
+      <artifactId>flyway-core</artifactId>
+      <version>4.0.3</version>
+    </dependency>
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk15on</artifactId>
+      <version>${cs.bcprov.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>joda-time</groupId>
+      <artifactId>joda-time</artifactId>
+      <version>${cs.joda-time.version}</version>
+    </dependency>
+  </dependencies>
+
+</project>
diff --git a/plugins/application-clusters/resources/META-INF/cloudstack/acs/module.properties b/plugins/application-clusters/resources/META-INF/cloudstack/acs/module.properties
new file mode 100644
index 00000000000..a1c246b99f2
--- /dev/null
+++ b/plugins/application-clusters/resources/META-INF/cloudstack/acs/module.properties
@@ -0,0 +1,15 @@
+# Copyright 2016 ShapeBlue Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name=application-clusters
+parent=compute
diff --git a/plugins/application-clusters/resources/META-INF/cloudstack/acs/spring-application-clusters-context.xml b/plugins/application-clusters/resources/META-INF/cloudstack/acs/spring-application-clusters-context.xml
new file mode 100644
index 00000000000..14dfbc31c4b
--- /dev/null
+++ b/plugins/application-clusters/resources/META-INF/cloudstack/acs/spring-application-clusters-context.xml
@@ -0,0 +1,32 @@
+<!--
+    Copyright 2016 ShapeBlue Ltd
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:aop="http://www.springframework.org/schema/aop"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+                      http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
+                      http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
+                      http://www.springframework.org/schema/context
+                      http://www.springframework.org/schema/context/spring-context-3.0.xsd"
+                      >
+
+    <bean id="ApplicationClusterDaoImpl"        class="org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDaoImpl" />
+    <bean id="ApplicationClusterDetailsDaoImpl" class="org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDetailsDaoImpl" />
+    <bean id="ApplicationClusterVmMapDaoImpl"   class="org.apache.cloudstack.applicationcluster.dao.ApplicationClusterVmMapDaoImpl" />
+    <bean id="ApplicationClusterManagerImpl"    class="org.apache.cloudstack.applicationcluster.ApplicationClusterManagerImpl" />
+
+</beans>
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/ApplicationClusterApiConstants.java b/plugins/application-clusters/src/org/apache/cloudstack/api/ApplicationClusterApiConstants.java
new file mode 100755
index 00000000000..ddcba0b8246
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/ApplicationClusterApiConstants.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api;
+
+public class ApplicationClusterApiConstants {
+    public static final String CONSOLE_END_POINT = "consoleendpoint";
+    public static final String DOCKER_REGISTRY_USER_NAME = "dockerregistryusername";
+    public static final String DOCKER_REGISTRY_PASSWORD = "dockerregistrypassword";
+    public static final String DOCKER_REGISTRY_URL = "dockerregistryurl";
+    public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail";
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/CreateApplicationClusterCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/CreateApplicationClusterCmd.java
new file mode 100644
index 00000000000..09e0803b8c2
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/CreateApplicationClusterCmd.java
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterEventTypes;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterService;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.user.Account;
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.acl.SecurityChecker.AccessType;
+import org.apache.cloudstack.api.ACL;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiCommandJobType;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.ApplicationClusterApiConstants;
+import org.apache.cloudstack.api.BaseAsyncCreateCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject.ResponseView;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.DomainResponse;
+import org.apache.cloudstack.api.response.NetworkResponse;
+import org.apache.cloudstack.api.response.ProjectResponse;
+import org.apache.cloudstack.api.response.ServiceOfferingResponse;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+
+@APICommand(name = CreateApplicationClusterCmd.APINAME,
+        description = "Creates a cluster of VM's for launching containers.",
+        responseObject = ApplicationClusterResponse.class,
+        responseView = ResponseView.Restricted,
+        entityType = {ApplicationCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class CreateApplicationClusterCmd extends BaseAsyncCreateCmd {
+
+    public static final Logger s_logger = Logger.getLogger(CreateApplicationClusterCmd.class.getName());
+
+    public static final String APINAME = "createApplicationCluster";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true,  description = "name for the container cluster")
+    private String name;
+
+    @Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, description = "description for the container cluster")
+    private String description;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true,
+            description = "availability zone in which container cluster to be launched")
+    private Long zoneId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
+            required = true, description = "the ID of the service offering for the virtual machines in the cluster.")
+    private Long serviceOfferingId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" +
+            " virtual machine. Must be used with domainId.")
+    private String accountName;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class,
+            description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.")
+    private Long domainId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class,
+            description = "Deploy cluster for the project")
+    private Long projectId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class,
+            description = "Network in which container cluster is to be launched")
+    private Long networkId;
+
+    @ACL(accessType = AccessType.UseEntry)
+    @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING,
+            description = "name of the ssh key pair used to login to the virtual machines")
+    private String sshKeyPairName;
+
+    @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
+            required = true, description = "number of container cluster nodes")
+    private Long clusterSize;
+
+    @Parameter(name = ApplicationClusterApiConstants.DOCKER_REGISTRY_USER_NAME, type = CommandType.STRING,
+            description = "user name for the docker image private registry")
+    private String dockerRegistryUserName;
+
+    @Parameter(name = ApplicationClusterApiConstants.DOCKER_REGISTRY_PASSWORD, type = CommandType.STRING,
+            description = "password for the docker image private registry")
+    private String dockerRegistryPassword;
+
+    @Parameter(name = ApplicationClusterApiConstants.DOCKER_REGISTRY_URL, type = CommandType.STRING,
+            description = "URL for the docker image private registry")
+    private String dockerRegistryUrl;
+
+    @Parameter(name = ApplicationClusterApiConstants.DOCKER_REGISTRY_EMAIL, type = CommandType.STRING,
+            description = "email of the docker image private registry user")
+    private String dockerRegistryEmail;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public String getAccountName() {
+        if (accountName == null) {
+            return CallContext.current().getCallingAccount().getAccountName();
+        }
+        return accountName;
+    }
+
+    public String getDisplayName() {
+        return description;
+    }
+
+    public Long getDomainId() {
+        if (domainId == null) {
+            return CallContext.current().getCallingAccount().getDomainId();
+        }
+        return domainId;
+    }
+
+    public Long getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public Long getZoneId() {
+        return zoneId;
+    }
+
+    public Long getNetworkId() { return networkId;}
+
+    public String getName() {
+        return name;
+    }
+
+    public String getSSHKeyPairName() {
+        return sshKeyPairName;
+    }
+
+    @Inject
+    public ApplicationClusterService _applicationClusterService;
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    public static String getResultObjectName() {
+        return "applicationcluster";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true);
+        if (accountId == null) {
+            return CallContext.current().getCallingAccount().getId();
+        }
+
+        return accountId;
+    }
+
+    @Override
+    public String getEventType() {
+        return ApplicationClusterEventTypes.EVENT_CONTAINER_CLUSTER_CREATE;
+    }
+
+    @Override
+    public String getCreateEventType() {
+        return ApplicationClusterEventTypes.EVENT_CONTAINER_CLUSTER_CREATE;
+    }
+
+    @Override
+    public String getCreateEventDescription() {
+        return "creating container cluster";
+    }
+
+    @Override
+    public String getEventDescription() {
+        return "creating container cluster. Cluster Id: " + getEntityId();
+    }
+
+    @Override
+    public ApiCommandJobType getInstanceType() {
+        return ApiCommandJobType.VirtualMachine;
+    }
+
+    @Override
+    public void execute() {
+
+        ApplicationCluster applicationCluster;
+
+        try {
+            _applicationClusterService.startContainerCluster(getEntityId(), true);
+            ApplicationClusterResponse response = _applicationClusterService.createContainerClusterResponse(getEntityId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (InsufficientCapacityException ex) {
+            s_logger.warn("Failed to deploy container cluster:" + getEntityUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR,
+                    "Failed to deploy container cluster:" + getEntityUuid(), ex);
+        } catch (ResourceUnavailableException ex) {
+            s_logger.warn("Failed to deploy container cluster:" + getEntityUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR,
+                    "Failed to deploy container cluster:" + getEntityUuid(), ex);
+        } catch (ResourceAllocationException ex) {
+            s_logger.warn("Failed to deploy container cluster:" + getEntityUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR,
+                    "Failed to deploy container cluster:" + getEntityUuid(), ex);
+        } catch (ManagementServerException ex) {
+            s_logger.warn("Failed to deploy container cluster:" + getEntityUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR,
+                    "Failed to deploy container cluster:" + getEntityUuid(), ex);
+        }
+    }
+
+    @Override
+    public void create() throws ResourceAllocationException {
+
+        try {
+
+            Account owner = _accountService.getActiveAccountById(getEntityOwnerId());
+
+            ApplicationCluster cluster = _applicationClusterService.createContainerCluster(name,
+                    description, zoneId, serviceOfferingId, owner, networkId, sshKeyPairName, clusterSize,
+                    dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl, dockerRegistryEmail);
+
+            if (cluster != null) {
+                setEntityId(cluster.getId());
+                setEntityUuid(cluster.getUuid());
+            } else {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create container cluster");
+            }
+        }  catch (ConcurrentOperationException ex) {
+            s_logger.error("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        } catch (InsufficientCapacityException ex) {
+            s_logger.error("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        } catch (ManagementServerException me) {
+            s_logger.error("Exception: ", me);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, me.getMessage());
+        }
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/DeleteApplicationClusterCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/DeleteApplicationClusterCmd.java
new file mode 100644
index 00000000000..0b8d108e835
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/DeleteApplicationClusterCmd.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterEventTypes;
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterService;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import javax.inject.Inject;
+
+@APICommand(name = DeleteApplicationClusterCmd.APINAME,
+        description = "deletes a container cluster",
+        responseObject = SuccessResponse.class,
+        entityType = {ApplicationCluster.class},
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class DeleteApplicationClusterCmd extends BaseAsyncCmd {
+
+    public static final Logger s_logger = Logger.getLogger(DeleteApplicationClusterCmd.class.getName());
+
+    public static final String APINAME = "deleteApplicationCluster";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name = ApiConstants.ID,
+            type = CommandType.UUID,
+            entityType = ApplicationClusterResponse.class,
+            required = true,
+            description = "the ID of the container cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    @Inject
+    public ApplicationClusterService _applicationClusterService;
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+
+    @Override
+    public void execute() throws ResourceUnavailableException, InsufficientCapacityException,
+            ServerApiException, ConcurrentOperationException, ResourceAllocationException,
+            NetworkRuleConflictException {
+        try {
+            _applicationClusterService.deleteContainerCluster(id);
+            SuccessResponse response = new SuccessResponse(getCommandName());
+            setResponseObject(response);
+        } catch (Exception e) {
+            s_logger.warn("Failed to delete vm container cluster due to " + e);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete vm container cluster", e);
+        }
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+
+    @Override
+    public String getEventType() {
+        return ApplicationClusterEventTypes.EVENT_CONTAINER_CLUSTER_DELETE;
+    }
+
+    @Override
+    public String getEventDescription() {
+        return "Deleting container cluster. Cluster Id: " + getId();
+    }
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCACertCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCACertCmd.java
new file mode 100644
index 00000000000..02750cce730
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCACertCmd.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterManager;
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.RootCACertResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.framework.security.keystore.KeystoreDao;
+import org.apache.cloudstack.framework.security.keystore.KeystoreVO;
+
+import javax.inject.Inject;
+
+@APICommand(name = ListApplicationClusterCACertCmd.APINAME,
+        description = "Lists the root CA certificate for using container clusters",
+        responseObject = RootCACertResponse.class,
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = false,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class ListApplicationClusterCACertCmd extends BaseCmd {
+
+    public static final String APINAME = "listApplicationClusterCACert";
+
+    @Inject
+    private KeystoreDao keystoreDao;
+
+    @Override
+    public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
+        final KeystoreVO rootCA = keystoreDao.findByName(ApplicationClusterManager.CCS_ROOTCA_KEYPAIR);
+        final RootCACertResponse response = new RootCACertResponse();
+        response.setRootCACert(rootCA.getCertificate());
+        response.setResponseName(getCommandName());
+        setResponseObject(response);
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCmd.java
new file mode 100644
index 00000000000..11c6d6bdb4e
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/ListApplicationClusterCmd.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.log4j.Logger;
+
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseListCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject.ResponseView;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterService;
+
+@APICommand(name = ListApplicationClusterCmd.APINAME,
+        description = "Lists container clusters",
+        responseObject = ApplicationClusterResponse.class,
+        responseView = ResponseView.Restricted,
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class ListApplicationClusterCmd extends BaseListCmd {
+
+    public static final Logger s_logger = Logger.getLogger(ListApplicationClusterCmd.class.getName());
+
+    public static final String APINAME = "listApplicationCluster";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = ApplicationClusterResponse.class,
+            description = "the ID of the container cluster")
+    private Long id;
+
+    @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "state of the container cluster")
+    private String state;
+
+    @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the container cluster" +
+            " (a substring match is made against the parameter value, data for all matching container clusters will be returned)")
+    private String name;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    @Inject
+    public ApplicationClusterService _applicationClusterService;
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public void execute() {
+
+        ListResponse<ApplicationClusterResponse> response = _applicationClusterService.listApplicationClusters(this);
+        response.setResponseName(getCommandName());
+        setResponseObject(response);
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StartApplicationClusterCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StartApplicationClusterCmd.java
new file mode 100644
index 00000000000..58320a30c0f
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StartApplicationClusterCmd.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterEventTypes;
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterService;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import javax.inject.Inject;
+
+@APICommand(name = StartApplicationClusterCmd.APINAME, description = "Starts a stopped container cluster",
+        responseObject = ApplicationClusterResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {ApplicationCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class StartApplicationClusterCmd extends BaseAsyncCmd {
+
+    public static final Logger s_logger = Logger.getLogger(StartApplicationClusterCmd.class.getName());
+
+    public static final String APINAME = "startApplicationCluster";
+
+    @Inject
+    public ApplicationClusterService applicationClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = ApplicationClusterResponse.class,
+            description = "the ID of the container cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    @Override
+    public String getEventType() {
+        return ApplicationClusterEventTypes.EVENT_CONTAINER_CLUSTER_START;
+    }
+
+    @Override
+    public String getEventDescription() {
+        return "Starting container cluster id: " + getId();
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    public ApplicationCluster validateRequest() {
+        if (getId() == null || getId() < 1L) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid container cluster ID provided");
+        }
+        final ApplicationCluster applicationCluster = applicationClusterService.findById(getId());
+        if (applicationCluster == null) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Given container cluster was not found");
+        }
+        return applicationCluster;
+    }
+
+    @Override
+    public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
+        final ApplicationCluster applicationCluster = validateRequest();
+        try {
+            applicationClusterService.startContainerCluster(getId().longValue(), false);
+            final ApplicationClusterResponse response = applicationClusterService.createContainerClusterResponse(getId());
+            response.setResponseName(getCommandName());
+            setResponseObject(response);
+        } catch (InsufficientCapacityException | ResourceUnavailableException | ManagementServerException  ex) {
+            s_logger.warn("Failed to start container cluster:" + applicationCluster.getUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR,
+                    "Failed to start container cluster:" + applicationCluster.getUuid(), ex);
+        }
+    }
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StopApplicationClusterCmd.java b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StopApplicationClusterCmd.java
new file mode 100644
index 00000000000..db86c96df45
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/command/user/applicationcluster/StopApplicationClusterCmd.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.command.user.applicationcluster;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterEventTypes;
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterService;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.log4j.Logger;
+
+import javax.inject.Inject;
+
+@APICommand(name = StopApplicationClusterCmd.APINAME, description = "Stops a running container cluster",
+        responseObject = SuccessResponse.class,
+        responseView = ResponseObject.ResponseView.Restricted,
+        entityType = {ApplicationCluster.class},
+        requestHasSensitiveInfo = false,
+        responseHasSensitiveInfo = true,
+        authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
+public class StopApplicationClusterCmd extends BaseAsyncCmd {
+
+    public static final Logger s_logger = Logger.getLogger(StopApplicationClusterCmd.class.getName());
+
+    public static final String APINAME = "stopApplicationCluster";
+
+    @Inject
+    public ApplicationClusterService applicationClusterService;
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+    @Parameter(name = ApiConstants.ID, type = CommandType.UUID,
+            entityType = ApplicationClusterResponse.class,
+            description = "the ID of the container cluster")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    @Override
+    public String getEventType() {
+        return ApplicationClusterEventTypes.EVENT_CONTAINER_CLUSTER_STOP;
+    }
+
+    @Override
+    public String getEventDescription() {
+        return "Stopping container cluster id: " + getId();
+    }
+
+    @Override
+    public String getCommandName() {
+        return APINAME.toLowerCase() + "response";
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        return CallContext.current().getCallingAccount().getId();
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    public ApplicationCluster validateRequest() {
+        if (getId() == null || getId() < 1L) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid container cluster ID provided");
+        }
+        final ApplicationCluster applicationCluster = applicationClusterService.findById(getId());
+        if (applicationCluster == null) {
+            throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Given container cluster was not found");
+        }
+        return applicationCluster;
+    }
+
+    @Override
+    public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
+        final ApplicationCluster applicationCluster = validateRequest();
+        try {
+            final boolean result = applicationClusterService.stopContainerCluster(getId());
+            final SuccessResponse response = new SuccessResponse(getCommandName());
+            response.setSuccess(result);
+            setResponseObject(response);
+        } catch (ManagementServerException ex) {
+            s_logger.warn("Failed to stop container cluster:" + applicationCluster.getUuid() + " due to " + ex.getMessage());
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR,
+                    "Failed to stop container cluster:" + applicationCluster.getUuid(), ex);
+        }
+    }
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/response/ApplicationClusterResponse.java b/plugins/application-clusters/src/org/apache/cloudstack/api/response/ApplicationClusterResponse.java
new file mode 100644
index 00000000000..44b767b9499
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/response/ApplicationClusterResponse.java
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.response;
+
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApplicationClusterApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import java.util.List;
+
+@SuppressWarnings("unused")
+@EntityReference(value = {ApplicationCluster.class})
+public class ApplicationClusterResponse extends BaseResponse implements ControlledEntityResponse {
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    public String getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(String zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public String getZoneName() {
+        return zoneName;
+    }
+
+    public void setZoneName(String zoneName) {
+        this.zoneName = zoneName;
+    }
+
+    public String getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public void setServiceOfferingId(String serviceOfferingId) {
+        this.serviceOfferingId = serviceOfferingId;
+    }
+
+    public String getTemplateId() {
+        return templateId;
+    }
+
+    public void setTemplateId(String templateId) {
+        this.templateId = templateId;
+    }
+
+    public String getNetworkId() {
+        return networkId;
+    }
+
+    public void setNetworkId(String networkId) {
+        this.networkId = networkId;
+    }
+
+    public String getKeypair() {
+        return keypair;
+    }
+
+    public void setKeypair(String keypair) {
+        this.keypair = keypair;
+    }
+
+    public String getClusterSize() {
+        return clusterSize;
+    }
+
+    public void setClusterSize(String clusterSize) {
+        this.clusterSize = clusterSize;
+    }
+
+    public String getCores() {
+        return cores;
+    }
+
+    public void setCores(String cores) {
+        this.cores = cores;
+    }
+
+    public String getMemory() {
+        return memory;
+    }
+
+    public void setMemory(String memory) {
+        this.memory = memory;
+    }
+
+    public String getState() { return  state;}
+
+    public void setState(String state) {this.state = state;}
+
+    public String getEndpoint() { return  endpoint;}
+
+    public void setEndpoint(String endpoint) {this.endpoint = endpoint;}
+
+    public String getConsoleEndpoint() { return  consoleendpoint;}
+
+    public void setConsoleEndpoint(String consoleendpoint) {this.consoleendpoint = consoleendpoint;}
+
+    public String getId() {
+        return this.id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getServiceOfferingName() {
+        return serviceOfferingName;
+    }
+
+    public void setServiceOfferingName(String serviceOfferingName) {
+        this.serviceOfferingName = serviceOfferingName;
+    }
+
+    public void setVirtualMachineIds(List<String> virtualMachineIds) { this.virtualMachineIds = virtualMachineIds;};
+
+    public void setUsername(String userName) { this.username = userName;}
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setPassword(String password) { this.password = password;}
+
+    public String getUsername() {
+        return username;
+    }
+
+    public List<String> getVirtualMachineIds() {
+        return virtualMachineIds;
+    }
+
+    public String getConsoleendpoint() {
+        return consoleendpoint;
+    }
+
+    @SerializedName(ApiConstants.ID)
+    @Param(description = "the id of the container cluster")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME)
+    @Param(description = "Name of the container cluster")
+    private String name;
+
+    @SerializedName(ApiConstants.DESCRIPTION)
+    @Param(description = "Description of the container cluster")
+    private String description;
+
+    @SerializedName(ApiConstants.ZONE_ID)
+    @Param(description = "zone id")
+    private String zoneId;
+
+    @SerializedName(ApiConstants.ZONE_NAME)
+    @Param(description = "zone name")
+    private String zoneName;
+
+    @SerializedName(ApiConstants.SERVICE_OFFERING_ID)
+    @Param(description = "Service Offering id")
+    private String serviceOfferingId;
+
+    @SerializedName("serviceofferingname")
+    @Param(description = "the name of the service offering of the virtual machine")
+    private String serviceOfferingName;
+
+    @SerializedName(ApiConstants.TEMPLATE_ID)
+    @Param(description = "template id")
+    private String templateId;
+
+    @SerializedName(ApiConstants.NETWORK_ID)
+    @Param(description = "network id details")
+    private String networkId;
+
+    @SerializedName(ApiConstants.ASSOCIATED_NETWORK_NAME)
+    @Param(description = "the name of the Network associated with the IP address")
+    private String associatedNetworkName;
+
+    public String getAssociatedNetworkName() {
+        return associatedNetworkName;
+    }
+
+    public void setAssociatedNetworkName(String associatedNetworkName) {
+        this.associatedNetworkName = associatedNetworkName;
+    }
+
+    @SerializedName(ApiConstants.SSH_KEYPAIR)
+    @Param(description = "keypair details")
+    private String keypair;
+
+    @SerializedName(ApiConstants.SIZE)
+    @Param(description = "cluster size")
+    private String clusterSize;
+
+    @SerializedName(ApiConstants.STATE)
+    @Param(description = "state of the cluster")
+    private String state;
+
+    @SerializedName(ApiConstants.CPU_NUMBER)
+    @Param(description = "cluster cpu cores")
+    private String cores;
+
+    @SerializedName(ApiConstants.MEMORY)
+    @Param(description = "cluster size")
+    private String memory;
+
+    @SerializedName(ApiConstants.END_POINT)
+    @Param(description = "URL end point for the cluster")
+    private String endpoint;
+
+    @SerializedName(ApplicationClusterApiConstants.CONSOLE_END_POINT)
+    @Param(description = "URL end point for the cluster UI")
+    private String consoleendpoint;
+
+    @SerializedName(ApiConstants.VIRTUAL_MACHINE_IDS)
+    @Param(description = "the list of virtualmachine ids associated with this container cluster")
+    private List<String> virtualMachineIds;
+
+    @SerializedName(ApiConstants.USERNAME)
+    @Param(description = "Username with which container cluster is setup")
+    private String username;
+
+    @SerializedName(ApiConstants.PASSWORD)
+    @Param(description = "Password with which container cluster is setup")
+    private String password;
+
+    public ApplicationClusterResponse() {
+
+    }
+
+    @Override
+    public void setAccountName(String accountName) {
+
+    }
+
+    @Override
+    public void setProjectId(String projectId) {
+
+    }
+
+    @Override
+    public void setProjectName(String projectName) {
+
+    }
+
+    @Override
+    public void setDomainId(String domainId) {
+
+    }
+
+    @Override
+    public void setDomainName(String domainName) {
+
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/api/response/RootCACertResponse.java b/plugins/application-clusters/src/org/apache/cloudstack/api/response/RootCACertResponse.java
new file mode 100644
index 00000000000..37bf91eeead
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/api/response/RootCACertResponse.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cloudstack.api.response;
+
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+
+public class RootCACertResponse extends BaseResponse {
+    @SerializedName(ApiConstants.CERTIFICATE)
+    @Param(description = "the root CA certificate in PEM format")
+    private String rootCACert;
+
+    public RootCACertResponse() {
+        setObjectName("rootcacert");
+    }
+
+    public String getRootCACert() {
+        return rootCACert;
+    }
+
+    public void setRootCACert(String rootCACert) {
+        this.rootCACert = rootCACert;
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationCluster.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationCluster.java
new file mode 100644
index 00000000000..e14efcdc9f2
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationCluster.java
@@ -0,0 +1,118 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import org.apache.cloudstack.acl.ControlledEntity;
+import org.apache.cloudstack.api.Displayable;
+import org.apache.cloudstack.api.Identity;
+import org.apache.cloudstack.api.InternalIdentity;
+import com.cloud.utils.fsm.StateMachine2;
+
+/**
+ * ApplicationCluster describes the properties of container cluster
+ *
+ */
+public interface ApplicationCluster extends ControlledEntity, com.cloud.utils.fsm.StateObject<ApplicationCluster.State>, Identity, InternalIdentity, Displayable {
+
+    enum Event {
+        StartRequested,
+        StopRequested,
+        DestroyRequested,
+        RecoveryRequested,
+        ScaleUpRequested,
+        ScaleDownRequested,
+        OperationSucceeded,
+        OperationFailed,
+        CreateFailed,
+        FaultsDetected;
+    }
+
+    enum State {
+        Created("Initial State of container cluster. At this state its just a logical/DB entry with no resources consumed"),
+        Starting("Resources needed for container cluster are being provisioned"),
+        Running("Necessary resources are provisioned and container cluster is in operational ready state to launch containers"),
+        Stopping("Ephermal resources for the container cluster are being destroyed"),
+        Stopped("All ephermal resources for the container cluster are destroyed, Container cluster may still have ephermal resource like persistent volumens provisioned"),
+        Scaling("Transient state in which resoures are either getting scaled up/down"),
+        Alert("State to represent container clusters which are not in expected desired state (operationally in active control place, stopped cluster VM's etc)."),
+        Recovering("State in which container cluster is recovering from alert state"),
+        Destroyed("End state of container cluster in which all resources are destroyed, cluster will not be useable further"),
+        Destroying("State in which resources for the container cluster is getting cleaned up or yet to be cleaned up by garbage collector"),
+        Error("State of the failed to create container clusters");
+
+        protected static final StateMachine2<State, ApplicationCluster.Event, ApplicationCluster> s_fsm = new StateMachine2<State, ApplicationCluster.Event, ApplicationCluster>();
+
+        public static StateMachine2<State, ApplicationCluster.Event, ApplicationCluster> getStateMachine() { return s_fsm; }
+
+        static {
+            s_fsm.addTransition(State.Created, Event.StartRequested, State.Starting);
+
+            s_fsm.addTransition(State.Starting, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Starting, Event.OperationFailed, State.Alert);
+            s_fsm.addTransition(State.Starting, Event.CreateFailed, State.Error);
+
+            s_fsm.addTransition(State.Running, Event.StopRequested, State.Stopping);
+            s_fsm.addTransition(State.Stopping, Event.OperationSucceeded, State.Stopped);
+            s_fsm.addTransition(State.Stopping, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Stopped, Event.StartRequested, State.Starting);
+
+            s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert);
+
+            s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling);
+            s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling);
+            s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Scaling, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Alert, Event.RecoveryRequested, State.Recovering);
+            s_fsm.addTransition(State.Recovering, Event.OperationSucceeded, State.Running);
+            s_fsm.addTransition(State.Recovering, Event.OperationFailed, State.Alert);
+
+            s_fsm.addTransition(State.Running, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Stopped, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Alert, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Error, Event.DestroyRequested, State.Destroying);
+
+            s_fsm.addTransition(State.Destroying, Event.DestroyRequested, State.Destroying);
+            s_fsm.addTransition(State.Destroying, Event.OperationSucceeded, State.Destroyed);
+            s_fsm.addTransition(State.Destroying, Event.OperationFailed, State.Destroying);
+
+        }
+        String _description;
+
+        private State(String description) {
+             _description = description;
+        }
+    }
+
+    long getId();
+    String getName();
+    String getDescription();
+    long getZoneId();
+    long getServiceOfferingId();
+    long getTemplateId();
+    long getNetworkId();
+    long getDomainId();
+    long getAccountId();
+    long getNodeCount();
+    String getKeyPair();
+    long getCores();
+    long getMemory();
+    String getEndpoint();
+    String getConsoleEndpoint();
+    State getState();
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterConfig.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterConfig.java
new file mode 100644
index 00000000000..1260c83ad39
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterConfig.java
@@ -0,0 +1,77 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import com.cloud.server.ManagementServer;
+
+public enum ApplicationClusterConfig {
+
+    ApplicationClusterTemplateName("Advanced", ManagementServer.class, String.class, "cloud.applicationcluster.template.name", null, "name of the template used for creating the machines in the cluster", null, null),
+    ApplicationClusterMasterCloudConfig("Advanced", ManagementServer.class, String.class, "cloud.applicationcluster.master.cloudconfig", null, "file location path of the cloud config used for creating a cluster master node", null, null),
+    ApplicationClusterNodeCloudConfig("Advanced", ManagementServer.class, String.class, "cloud.applicationcluster.node.cloudconfig", null, "file location path of the cloud config used for creating a cluster node", null, null),
+    ApplicationClusterNetworkOffering("Advanced", ManagementServer.class, String.class, "cloud.applicationcluster.network.offering", null, "Name of the network offering that will be used to create a isolated network in which the cluster VMs will be launched.", null, null);
+
+
+    private final String _category;
+    private final Class<?> _componentClass;
+    private final Class<?> _type;
+    private final String _name;
+    private final String _defaultValue;
+    private final String _description;
+    private final String _range;
+    private final String _scope;
+
+    private ApplicationClusterConfig(String category, Class<?> componentClass, Class<?> type, String name, String defaultValue, String description, String range, String scope) {
+        _category = category;
+        _componentClass = componentClass;
+        _type = type;
+        _name = name;
+        _defaultValue = defaultValue;
+        _description = description;
+        _range = range;
+        _scope = scope;
+    }
+
+    public String getCategory() {
+        return _category;
+    }
+
+    public String key() {
+        return _name;
+    }
+
+    public String getDescription() {
+        return _description;
+    }
+
+    public String getDefaultValue() {
+        return _defaultValue;
+    }
+
+    public Class<?> getType() {
+        return _type;
+    }
+
+    public Class<?> getComponentClass() {
+        return _componentClass;
+    }
+
+    public String getScope() {
+        return _scope;
+    }
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetails.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetails.java
new file mode 100644
index 00000000000..81b87829c60
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetails.java
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+/**
+ * Container cluster details
+ *
+ */
+public interface ApplicationClusterDetails {
+    long getId();
+    long getClusterId();
+    String getUserName();
+    String getPassword();
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetailsVO.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetailsVO.java
new file mode 100644
index 00000000000..71cf8ce34d2
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterDetailsVO.java
@@ -0,0 +1,138 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+@Entity
+@Table(name = "sb_ccs_container_cluster_details")
+public class ApplicationClusterDetailsVO implements ApplicationClusterDetails {
+
+    public long getId() {
+        return id;
+    }
+
+    public long getClusterId() {
+        return clusterId;
+    }
+
+    public String getUserName() {
+        return username;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public void setClusterId(long clusterId) {
+        this.clusterId = clusterId;
+    }
+
+    public String getRegistryUsername() {
+        return registryUsername;
+    }
+
+    public void setRegistryUsername(String registryUsername) {
+        this.registryUsername = registryUsername;
+    }
+
+    public String getRegistryPassword() {
+        return registryPassword;
+    }
+
+    public void setRegistryPassword(String registryPassword) {
+        this.registryPassword = registryPassword;
+    }
+
+    public String getRegistryUrl() {
+        return registryUrl;
+    }
+
+    public void setRegistryUrl(String registryUrl) {
+        this.registryUrl = registryUrl;
+    }
+
+    public String getRegistryEmail() {
+        return registryEmail;
+    }
+
+    public void setRegistryEmail(String registryEmail) {
+        this.registryEmail = registryEmail;
+    }
+
+    public boolean getNetworkCleanup() {
+        return networkCleanup;
+    }
+
+    public void setNetworkCleanup(boolean networkCleanup) {
+        this.networkCleanup = networkCleanup;
+    }
+
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    long id;
+
+    @Column(name = "cluster_id")
+    long clusterId;
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    @Column(name = "username")
+    String username;
+
+    @Column(name = "password")
+    String password;
+
+    @Column(name = "registry_username")
+    String registryUsername;
+
+    @Column(name = "registry_password")
+    String registryPassword;
+
+    @Column(name = "registry_url")
+    String registryUrl;
+
+    @Column(name = "registry_email")
+    String registryEmail;
+
+    @Column(name = "network_cleanup")
+    boolean networkCleanup;
+
+    public ApplicationClusterDetailsVO() {
+
+    }
+
+    public ApplicationClusterDetailsVO(long clusterId, String userName, String password) {
+        this.clusterId = clusterId;
+        this.username = userName;
+        this.password = password;
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterEventTypes.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterEventTypes.java
new file mode 100755
index 00000000000..473ddb5eee4
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterEventTypes.java
@@ -0,0 +1,24 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+public class ApplicationClusterEventTypes {
+    public static final String EVENT_CONTAINER_CLUSTER_CREATE = "CONTAINER.CLUSTER.CREATE";
+    public static final String EVENT_CONTAINER_CLUSTER_DELETE = "CONTAINER.CLUSTER.DELETE";
+    public static final String EVENT_CONTAINER_CLUSTER_START = "CONTAINER.CLUSTER.START";
+    public static final String EVENT_CONTAINER_CLUSTER_STOP = "CONTAINER.CLUSTER.STOP";
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManager.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManager.java
new file mode 100644
index 00000000000..6e249421f1a
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManager.java
@@ -0,0 +1,24 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+public interface ApplicationClusterManager extends ApplicationClusterService {
+    String CCS_ROOTCA_KEYPAIR = "CCS_ROOTCA_KEYPAIR";
+    String CCS_ROOTCA_CN = "CN=cloudstack";
+    String CCS_CLUSTER_CN = "CN=applicationcluster.cloudstack";
+    String CCS_RSA_PRIVATE_KEY = "RSA PRIVATE KEY";
+}
\ No newline at end of file
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImpl.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImpl.java
new file mode 100644
index 00000000000..45c445fdb37
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImpl.java
@@ -0,0 +1,2044 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import com.cloud.api.ApiDBUtils;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.PermissionDeniedException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.host.Host.Type;
+import com.cloud.host.HostVO;
+import com.cloud.network.IpAddress;
+import com.cloud.network.Network;
+import com.cloud.network.Network.Service;
+import com.cloud.network.NetworkModel;
+import com.cloud.network.NetworkService;
+import com.cloud.network.PhysicalNetwork;
+import com.cloud.network.dao.FirewallRulesDao;
+import com.cloud.network.dao.IPAddressDao;
+import com.cloud.network.dao.IPAddressVO;
+import com.cloud.network.dao.NetworkDao;
+import com.cloud.network.dao.NetworkVO;
+import com.cloud.network.dao.PhysicalNetworkDao;
+import com.cloud.network.firewall.FirewallService;
+import com.cloud.network.rules.FirewallRule;
+import com.cloud.network.rules.FirewallRuleVO;
+import com.cloud.network.rules.PortForwardingRuleVO;
+import com.cloud.network.rules.RulesService;
+import com.cloud.network.rules.dao.PortForwardingRulesDao;
+import org.apache.cloudstack.network.tls.CertService;
+import com.cloud.offering.NetworkOffering;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.offerings.NetworkOfferingVO;
+import com.cloud.offerings.dao.NetworkOfferingDao;
+import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
+import com.cloud.org.Grouping;
+import com.cloud.resource.ResourceManager;
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VMTemplateZoneVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateZoneDao;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.SSHKeyPairVO;
+import com.cloud.user.User;
+import com.cloud.user.dao.AccountDao;
+import com.cloud.user.dao.SSHKeyPairDao;
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.Pair;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.cloud.utils.db.DbProperties;
+import com.cloud.utils.db.Filter;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
+import com.cloud.utils.db.TransactionCallbackWithException;
+import com.cloud.utils.db.TransactionStatus;
+import com.cloud.utils.fsm.NoTransitionException;
+import com.cloud.utils.fsm.StateMachine2;
+import com.cloud.utils.net.Ip;
+import com.cloud.vm.Nic;
+import com.cloud.vm.ReservationContext;
+import com.cloud.vm.ReservationContextImpl;
+import com.cloud.vm.UserVmService;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+import org.apache.cloudstack.acl.ControlledEntity;
+import org.apache.cloudstack.acl.SecurityChecker;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.command.user.applicationcluster.CreateApplicationClusterCmd;
+import org.apache.cloudstack.api.command.user.applicationcluster.DeleteApplicationClusterCmd;
+import org.apache.cloudstack.api.command.user.applicationcluster.ListApplicationClusterCACertCmd;
+import org.apache.cloudstack.api.command.user.applicationcluster.ListApplicationClusterCmd;
+import org.apache.cloudstack.api.command.user.applicationcluster.StartApplicationClusterCmd;
+import org.apache.cloudstack.api.command.user.applicationcluster.StopApplicationClusterCmd;
+import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd;
+import org.apache.cloudstack.api.command.user.vm.StartVMCmd;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDao;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDetailsDao;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterVmMapDao;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.framework.security.keystore.KeystoreDao;
+import org.apache.cloudstack.framework.security.keystore.KeystoreVO;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.log4j.Logger;
+import org.bouncycastle.asn1.ASN1Encodable;
+import org.bouncycastle.asn1.DERSequence;
+import org.bouncycastle.asn1.x509.GeneralName;
+import org.bouncycastle.asn1.x509.SubjectKeyIdentifier;
+import org.bouncycastle.asn1.x509.X509Extensions;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.util.io.pem.PemObject;
+import org.bouncycastle.util.io.pem.PemReader;
+import org.bouncycastle.util.io.pem.PemWriter;
+import org.bouncycastle.x509.X509V1CertificateGenerator;
+import org.bouncycastle.x509.X509V3CertificateGenerator;
+import org.bouncycastle.x509.extension.AuthorityKeyIdentifierStructure;
+import org.flywaydb.core.Flyway;
+import org.flywaydb.core.api.FlywayException;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+import javax.security.auth.x500.X500Principal;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.Socket;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.security.InvalidKeyException;
+import java.security.KeyFactory;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.SecureRandom;
+import java.security.Security;
+import java.security.SignatureException;
+import java.security.cert.CertificateEncodingException;
+import java.security.cert.CertificateParsingException;
+import java.security.cert.X509Certificate;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+//SubjectKeyIdentifierStructure;
+
+@Local(value = {ApplicationClusterManager.class})
+public class ApplicationClusterManagerImpl extends ManagerBase implements ApplicationClusterManager, ApplicationClusterService {
+
+    private static final Logger s_logger = Logger.getLogger(ApplicationClusterManagerImpl.class);
+
+    protected StateMachine2<ApplicationCluster.State, ApplicationCluster.Event, ApplicationCluster> _stateMachine = ApplicationCluster.State.getStateMachine();
+
+    ScheduledExecutorService _gcExecutor;
+    ScheduledExecutorService _stateScanner;
+
+    @Inject
+    protected KeystoreDao keystoreDao;
+    @Inject
+    protected ApplicationClusterDao _applicationClusterDao;
+    @Inject
+    protected ApplicationClusterVmMapDao _clusterVmMapDao;
+    @Inject
+    protected ApplicationClusterDetailsDao _applicationClusterDetailsDao;
+    @Inject
+    protected SSHKeyPairDao _sshKeyPairDao;
+    @Inject
+    protected UserVmService _userVmService;
+    @Inject
+    protected DataCenterDao _dcDao;
+    @Inject
+    protected ServiceOfferingDao _offeringDao;
+    @Inject
+    protected VMTemplateDao _templateDao;
+    @Inject
+    protected AccountDao _accountDao;
+    @Inject
+    protected UserVmDao _vmDao;
+    @Inject
+    protected ConfigurationDao _globalConfigDao;
+    @Inject
+    protected NetworkService _networkService;
+    @Inject
+    protected NetworkOfferingDao _networkOfferingDao;
+    @Inject
+    protected NetworkModel _networkModel;
+    @Inject
+    protected PhysicalNetworkDao _physicalNetworkDao;
+    @Inject
+    protected NetworkOrchestrationService _networkMgr;
+    @Inject
+    protected NetworkDao _networkDao;
+    @Inject
+    protected IPAddressDao _publicIpAddressDao;
+    @Inject
+    protected PortForwardingRulesDao _portForwardingDao;
+    @Inject
+    private FirewallService _firewallService;
+    @Inject
+    protected RulesService _rulesService;
+    @Inject
+    private NetworkOfferingServiceMapDao _ntwkOfferingServiceMapDao;
+    @Inject
+    protected AccountManager _accountMgr;
+    @Inject
+    protected ApplicationClusterVmMapDao _applicationClusterVmMapDao;
+    @Inject
+    protected ServiceOfferingDao _srvOfferingDao;
+    @Inject
+    protected UserVmDao _userVmDao;
+    @Inject
+    private VMInstanceDao _vmInstanceDao;
+    @Inject
+    private VMTemplateZoneDao _templateZoneDao;
+    @Inject
+    protected CapacityManager _capacityMgr;
+    @Inject
+    protected ResourceManager _resourceMgr;
+    @Inject
+    protected ClusterDetailsDao _clusterDetailsDao;
+    @Inject
+    protected ClusterDao _clusterDao;
+    @Inject
+    FirewallRulesDao _firewallDao;
+    @Inject
+    protected CertService certService;
+
+    @Override
+    public ApplicationCluster findById(final Long id) {
+        return _applicationClusterDao.findById(id);
+    }
+
+    @Override
+    public ApplicationCluster createContainerCluster(final String name,
+                                                 final String displayName,
+                                                 final Long zoneId,
+                                                 final Long serviceOfferingId,
+                                                 final Account owner,
+                                                 final Long networkId,
+                                                 final String sshKeyPair,
+                                                 final Long clusterSize,
+                                                 final String dockerRegistryUserName,
+                                                 final String dockerRegistryPassword,
+                                                 final String dockerRegistryUrl,
+                                                 final String dockerRegistryEmail)
+            throws InsufficientCapacityException, ResourceAllocationException, ManagementServerException {
+
+        if (name == null || name.isEmpty()) {
+            throw new InvalidParameterValueException("Invalid name for the container cluster name:" + name);
+        }
+
+        if (clusterSize < 1 || clusterSize > 100) {
+            throw new InvalidParameterValueException("invalid cluster size " + clusterSize);
+        }
+
+        DataCenter zone =  _dcDao.findById(zoneId);
+        if (zone == null) {
+            throw new InvalidParameterValueException("Unable to find zone by id:" + zoneId);
+        }
+
+        if (Grouping.AllocationState.Disabled == zone.getAllocationState()) {
+            throw new PermissionDeniedException("Cannot perform this operation, Zone:" + zone.getId() + " is currently disabled.");
+        }
+
+        ServiceOffering serviceOffering = _srvOfferingDao.findById(serviceOfferingId);
+        if (serviceOffering == null) {
+            throw new InvalidParameterValueException("No service offering with id:" + serviceOfferingId);
+        }
+
+        if(sshKeyPair != null && !sshKeyPair.isEmpty()) {
+            SSHKeyPairVO sshkp = _sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
+            if (sshkp == null) {
+                throw new InvalidParameterValueException("Given SSH key pair with name:" + sshKeyPair + " was not found for the account " + owner.getAccountName());
+            }
+        }
+
+        if (!isApplicationClusterServiceConfigured(zone)) {
+            throw new ManagementServerException("Container service has not been configured properly to provision container clusters.");
+        }
+
+        VMTemplateVO template = _templateDao.findByTemplateName(_globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterTemplateName.key()));
+        List<VMTemplateZoneVO> listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId());
+        if (listZoneTemplate == null || listZoneTemplate.isEmpty()) {
+            s_logger.warn("The template:" + template.getId() + " is not available for use in zone:" + zoneId + " to provision container cluster name:" + name);
+            throw new ManagementServerException("Container service has not been configured properly to provision container clusters.");
+        }
+
+        if (!validateServiceOffering(_srvOfferingDao.findById(serviceOfferingId))) {
+            throw new InvalidParameterValueException("This service offering is not suitable for an application cluster, service offering id is " + networkId);
+        }
+
+        validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl, dockerRegistryEmail);
+
+        plan(clusterSize, zoneId, _srvOfferingDao.findById(serviceOfferingId));
+
+        Network network = null;
+        if (networkId != null) {
+            if (_applicationClusterDao.listByNetworkId(networkId).isEmpty()) {
+                network = _networkService.getNetwork(networkId);
+                if (network == null) {
+                    throw new InvalidParameterValueException("Unable to find network by ID " + networkId);
+                }
+                if (!validateNetwork(network)){
+                    throw new InvalidParameterValueException("This network is not suitable for an application cluster, network id is " + networkId);
+                }
+                _networkModel.checkNetworkPermissions(owner, network);
+            }
+            else {
+                throw new InvalidParameterValueException("This network is already under use by another application cluster, network id is " + networkId);
+            }
+        } else { // user has not specified network in which cluster VM's to be provisioned, so create a network for container cluster
+            NetworkOfferingVO networkOffering = _networkOfferingDao.findByUniqueName(
+                    _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterNetworkOffering.key()));
+
+            long physicalNetworkId = _networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType());
+            PhysicalNetwork physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId);
+
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Creating network for account " + owner + " from the network offering id=" +
+                        networkOffering.getId() + " as a part of cluster: " + name + " deployment process");
+            }
+
+            try {
+                network = _networkMgr.createGuestNetwork(networkOffering.getId(), name + "-network", owner.getAccountName() + "-network",
+                        null, null, null, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null);
+            } catch(Exception e) {
+                s_logger.warn("Unable to create a network for the container cluster due to " + e);
+                throw new ManagementServerException("Unable to create a network for the container cluster.");
+            }
+        }
+
+        final Network defaultNetwork = network;
+        final VMTemplateVO finalTemplate = template;
+        final long cores = serviceOffering.getCpu() * clusterSize;
+        final long memory = serviceOffering.getRamSize() * clusterSize;
+
+        final ApplicationClusterVO cluster = Transaction.execute(new TransactionCallback<ApplicationClusterVO>() {
+            @Override
+            public ApplicationClusterVO doInTransaction(TransactionStatus status) {
+                ApplicationClusterVO newCluster = new ApplicationClusterVO(name, displayName, zoneId,
+                        serviceOfferingId, finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(),
+                        owner.getAccountId(), clusterSize, ApplicationCluster.State.Created, sshKeyPair, cores, memory, "", "");
+                _applicationClusterDao.persist(newCluster);
+                return newCluster;
+            }
+        });
+
+        Transaction.execute(new TransactionCallback<ApplicationClusterDetailsVO>() {
+            @Override
+            public ApplicationClusterDetailsVO doInTransaction(TransactionStatus status) {
+                ApplicationClusterDetailsVO clusterDetails = new ApplicationClusterDetailsVO();
+                clusterDetails.setClusterId(cluster.getId());
+                clusterDetails.setRegistryUsername(dockerRegistryUserName);
+                clusterDetails.setRegistryPassword(dockerRegistryPassword);
+                clusterDetails.setRegistryUrl(dockerRegistryUrl);
+                clusterDetails.setRegistryEmail(dockerRegistryEmail);
+                clusterDetails.setUsername("admin");
+                SecureRandom random = new SecureRandom();
+                String randomPassword = new BigInteger(130, random).toString(32);
+                clusterDetails.setPassword(randomPassword);
+                clusterDetails.setNetworkCleanup(networkId == null);
+                _applicationClusterDetailsDao.persist(clusterDetails);
+                return clusterDetails;
+            }
+        });
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("A container cluster name:" + name + " id:" + cluster.getId() + " has been created.");
+        }
+
+        return cluster;
+    }
+
+
+    // Start operation can be performed at two diffrent life stages of container cluster. First when a freshly created cluster
+    // in which case there are no resources provisisioned for the container cluster. So during start all the resources
+    // are provisioned from scratch. Second kind of start, happens on  Stopped container cluster, in which all resources
+    // are provisioned (like volumes, nics, networks etc). It just that VM's are not in running state. So just
+    // start the VM's (which can possibly implicitly start the network also).
+    @Override
+    public boolean startContainerCluster(long containerClusterId, boolean onCreate) throws ManagementServerException,
+            ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException {
+
+        if (onCreate) {
+            // Start for container cluster in 'Created' state
+            return startContainerClusterOnCreate(containerClusterId);
+        } else {
+            // Start for container cluster in 'Stopped' state. Resources are already provisioned, just need to be started
+            return startStoppedContainerCluster(containerClusterId);
+        }
+    }
+
+    // perform a cold start (which will provision resources as well)
+    private boolean startContainerClusterOnCreate(final long containerClusterId) throws ManagementServerException {
+
+        // Starting a contriner cluster has below workflow
+        //   - start the newtwork
+        //   - provision the master /node VM
+        //   - priovision node VM's (as many as cluster size)
+        //   - update the booke keeping data of the VM's provisioned for the cluster
+        //   - setup networking (add Firewall and PF rules)
+        //   - wait till kubernetes API server on master VM to come up
+        //   - wait till addon services (dashboard etc) to come up
+        //   - update API and dashboard URL endpoints in container cluster details
+
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Starting container cluster: " + containerCluster.getName());
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.StartRequested);
+
+        Account account = _accountDao.findById(containerCluster.getAccountId());
+
+        DeployDestination dest = null;
+        try {
+            dest = plan(containerClusterId, containerCluster.getZoneId());
+        }
+        catch (InsufficientCapacityException e){
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            s_logger.warn("Provisioning the cluster failed due to insufficient capacity in the container cluster: " + containerCluster.getName() + " due to " + e);
+            throw new ManagementServerException("Provisioning the cluster failed due to insufficient capacity in the container cluster: " + containerCluster.getName(), e);
+        }
+        final ReservationContext context = new ReservationContextImpl(null, null, null, account);
+
+        try {
+            _networkMgr.startNetwork(containerCluster.getNetworkId(), dest, context);
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Network:" + containerCluster.getNetworkId() + " is started for the  container cluster: " + containerCluster.getName());
+            }
+        } catch (RuntimeException e) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            s_logger.warn("Starting the network failed as part of starting container cluster " + containerCluster.getName() + " due to " + e);
+            throw new ManagementServerException("Failed to start the network while creating container cluster name:" + containerCluster.getName(), e);
+        } catch(Exception e) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            s_logger.warn("Starting the network failed as part of starting container cluster " + containerCluster.getName() + " due to " + e);
+            throw new ManagementServerException("Failed to start the network while creating container cluster name:" + containerCluster.getName(), e);
+        }
+
+        IPAddressVO publicIp = null;
+        List<IPAddressVO> ips = _publicIpAddressDao.listByAssociatedNetwork(containerCluster.getNetworkId(), true);
+        if (ips == null || ips.isEmpty()) {
+            s_logger.warn("Network:" + containerCluster.getNetworkId() + " for the container cluster name:" + containerCluster.getName() + " does not have " +
+                    "public IP's assocated with it. So aborting container cluster strat.");
+            throw new ManagementServerException("Failed to start the network while creating container cluster name:" + containerCluster.getName());
+        }
+        publicIp = ips.get(0);
+
+        UserVm masterNode = null;
+        try {
+            masterNode = createK8SMaster(containerCluster, publicIp);
+
+            final long clusterId = containerCluster.getId();
+            final long masterVmId = masterNode.getId();
+            Transaction.execute(new TransactionCallback<ApplicationClusterVmMapVO>() {
+                @Override
+                public ApplicationClusterVmMapVO doInTransaction(TransactionStatus status) {
+                    ApplicationClusterVmMapVO newClusterVmMap = new ApplicationClusterVmMapVO(clusterId, masterVmId);
+                    _clusterVmMapDao.persist(newClusterVmMap);
+                    return newClusterVmMap;
+                }
+            });
+
+            startClusterVM(masterNode, containerCluster);
+            masterNode = _vmDao.findById(masterNode.getId());
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Provisioned the master VM's in to the container cluster name:" + containerCluster.getName());
+            }
+        } catch (RuntimeException e) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            s_logger.warn("Provisioning the master VM' failed in the container cluster: " + containerCluster.getName() + " due to " + e);
+            throw new ManagementServerException("Provisioning the master VM' failed in the container cluster: " + containerCluster.getName(), e);
+        } catch (Exception e) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            s_logger.warn("Provisioning the master VM' failed in the container cluster: " + containerCluster.getName() + " due to " + e);
+            throw new ManagementServerException("Provisioning the master VM' failed in the container cluster: " + containerCluster.getName(), e);
+        }
+
+        String masterIP = masterNode.getPrivateIpAddress();
+
+        long anyNodeVmId = 0;
+        UserVm k8anyNodeVM = null;
+        for (int i=1; i <= containerCluster.getNodeCount(); i++) {
+            UserVm vm = null;
+            try {
+                vm = createK8SNode(containerCluster, masterIP, i);
+                final long nodeVmId = vm.getId();
+                ApplicationClusterVmMapVO clusterNodeVmMap = Transaction.execute(new TransactionCallback<ApplicationClusterVmMapVO>() {
+                    @Override
+                    public ApplicationClusterVmMapVO doInTransaction(TransactionStatus status) {
+                        ApplicationClusterVmMapVO newClusterVmMap = new ApplicationClusterVmMapVO(containerClusterId, nodeVmId);
+                        _clusterVmMapDao.persist(newClusterVmMap);
+                        return newClusterVmMap;
+                    }
+                });
+                startClusterVM(vm, containerCluster);
+
+                vm = _vmDao.findById(vm.getId());
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Provisioned a node VM in to the container cluster: " + containerCluster.getName());
+                }
+            } catch (RuntimeException e) {
+                stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+                s_logger.warn("Provisioning the node VM failed in the container cluster " + containerCluster.getName() + " due to " + e);
+                throw new ManagementServerException("Provisioning the node VM failed in the container cluster " + containerCluster.getName(), e);
+            } catch (Exception e) {
+                stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+                s_logger.warn("Provisioning the node VM failed in the container cluster " + containerCluster.getName() + " due to " + e);
+                throw new ManagementServerException("Provisioning the node VM failed in the container cluster " + containerCluster.getName(), e);
+            }
+
+            if (anyNodeVmId == 0) {
+                anyNodeVmId = vm.getId();
+                k8anyNodeVM = vm;
+            }
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Container cluster : " + containerCluster.getName() + " VM's are successfully provisioned.");
+        }
+
+        setupContainerClusterNetworkRules(publicIp, account, containerClusterId, masterNode.getId());
+
+        int retryCounter = 0;
+        int maxRetries = 10;
+        boolean k8sApiServerSetup = false;
+
+        while (retryCounter < maxRetries) {
+            try (Socket socket = new Socket()) {
+                socket.connect(new InetSocketAddress(publicIp.getAddress().addr(), 443), 10000);
+                k8sApiServerSetup = true;
+                containerCluster = _applicationClusterDao.findById(containerClusterId);
+                containerCluster.setEndpoint("https://" + publicIp.getAddress() + "/");
+                _applicationClusterDao.update(containerCluster.getId(), containerCluster);
+                break;
+            } catch (IOException e) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Waiting for container cluster: " + containerCluster.getName() + " API endpoint to be available. retry: " + retryCounter + "/" + maxRetries);
+                }
+                try { Thread.sleep(50000); } catch (InterruptedException ex) {}
+                retryCounter++;
+            }
+        }
+
+        if (k8sApiServerSetup) {
+
+            retryCounter = 0;
+            maxRetries = 10;
+            // Dashbaord service is a docker image downloaded at run time.
+            // So wait for some time and check if dashbaord service is up running.
+            while (retryCounter < maxRetries) {
+
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Waiting for dashboard service for the container cluster: " + containerCluster.getName()
+                            + " to come up. Attempt: " + retryCounter + " of max retries " + maxRetries);
+                }
+
+                if (isAddOnServiceRunning(containerCluster.getId(), "kubernetes-dashboard")) {
+
+                    stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationSucceeded);
+
+                    containerCluster = _applicationClusterDao.findById(containerClusterId);
+                    containerCluster.setConsoleEndpoint("https://" + publicIp.getAddress() + "/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard");
+                    _applicationClusterDao.update(containerCluster.getId(), containerCluster);
+
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Container cluster name:" + containerCluster.getName() + " is successfully started");
+                    }
+
+                    return true;
+                }
+
+                try { Thread.sleep(30000);} catch (InterruptedException ex) {}
+                retryCounter++;
+            }
+            s_logger.warn("Failed to setup container cluster " + containerCluster.getName() + " in usable state as" +
+                    " unable to bring dashboard add on service up");
+        } else {
+            s_logger.warn("Failed to setup container cluster " + containerCluster.getName() + " in usable state as" +
+                    " unable to bring the API server up");
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+
+        throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR,
+                "Failed to deploy container cluster: " + containerCluster.getId() + " as unable to setup up in usable state");
+    }
+
+    private boolean startStoppedContainerCluster(long containerClusterId) throws ManagementServerException,
+            ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException {
+
+        final ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+        if (containerCluster == null) {
+            throw new ManagementServerException("Failed to find container cluster id: " + containerClusterId);
+        }
+
+        if (containerCluster.getRemoved() != null) {
+            throw new ManagementServerException("Container cluster id:" + containerClusterId + " is already deleted.");
+        }
+
+        if (containerCluster.getState().equals(ApplicationCluster.State.Running) ){
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Container cluster id: " + containerClusterId + " is already Running.");
+            }
+            return true;
+        }
+
+        if (containerCluster.getState().equals(ApplicationCluster.State.Starting) ){
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Container cluster id: " + containerClusterId + " is getting started.");
+            }
+            return true;
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Starting container cluster: " + containerCluster.getName());
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.StartRequested);
+
+        for (final ApplicationClusterVmMapVO vmMapVO : _clusterVmMapDao.listByClusterId(containerClusterId)) {
+            final UserVmVO vm = _userVmDao.findById(vmMapVO.getVmId());
+            try {
+                if (vm == null) {
+                    stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+                    throw new ManagementServerException("Failed to start all VMs in container cluster id: " + containerClusterId);
+                }
+                startClusterVM(vm, containerCluster);
+            } catch (ServerApiException ex) {
+                s_logger.warn("Failed to start VM in container cluster id:" + containerClusterId + " due to " + ex);
+                // dont bail out here. proceed further to stop the reset of the VM's
+            }
+        }
+
+        for (final ApplicationClusterVmMapVO vmMapVO : _clusterVmMapDao.listByClusterId(containerClusterId)) {
+            final UserVmVO vm = _userVmDao.findById(vmMapVO.getVmId());
+            if (vm == null || !vm.getState().equals(VirtualMachine.State.Running)) {
+                stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+                throw new ManagementServerException("Failed to start all VMs in container cluster id: " + containerClusterId);
+            }
+        }
+
+        InetAddress address=null;
+        try {
+            address = InetAddress.getByName(new URL(containerCluster.getEndpoint()).getHost());
+        } catch (MalformedURLException | UnknownHostException ex) {
+            // API end point is generated by CCS, so this situation should not arise.
+            s_logger.warn("Container cluster id:" + containerClusterId + " has invalid api endpoint. Can not " +
+                    "verify if cluster is in ready state.");
+            throw new ManagementServerException("Can not verify if container cluster id:" + containerClusterId + " is in usable state.");
+        }
+
+        // wait for fixed time for K8S api server to be avaialble
+        int retryCounter = 0;
+        int maxRetries = 10;
+        boolean k8sApiServerSetup = false;
+        while (retryCounter < maxRetries) {
+            try (Socket socket = new Socket()) {
+                socket.connect(new InetSocketAddress(address.getHostAddress(), 443), 10000);
+                k8sApiServerSetup = true;
+                break;
+            } catch (IOException e) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Waiting for container cluster: " + containerCluster.getName() + " API endpoint to be available. retry: " + retryCounter + "/" + maxRetries);
+                }
+                try { Thread.sleep(50000); } catch (InterruptedException ex) {}
+                retryCounter++;
+            }
+        }
+
+        if (!k8sApiServerSetup) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+            throw new ManagementServerException("Failed to setup container cluster id: " + containerClusterId + " is usable state.");
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationSucceeded);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug(" Container cluster name:" + containerCluster.getName() + " is successfully started.");
+        }
+        return true;
+    }
+
+    // Open up  firewall port 443, secure port on which kubernetes API server is running. Also create portforwarding
+    // rule to forward public IP traffic to master VM private IP
+    private void setupContainerClusterNetworkRules(IPAddressVO publicIp, Account account, long containerClusterId,
+                                                   long masterVmId) throws  ManagementServerException {
+
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+
+        List<String> sourceCidrList = new ArrayList<String>();
+        sourceCidrList.add("0.0.0.0/0");
+
+        try {
+            CreateFirewallRuleCmd rule = new CreateFirewallRuleCmd();
+            rule = ComponentContext.inject(rule);
+
+            Field addressField = rule.getClass().getDeclaredField("ipAddressId");
+            addressField.setAccessible(true);
+            addressField.set(rule, publicIp.getId());
+
+            Field protocolField = rule.getClass().getDeclaredField("protocol");
+            protocolField.setAccessible(true);
+            protocolField.set(rule, "TCP");
+
+            Field startPortField = rule.getClass().getDeclaredField("publicStartPort");
+            startPortField.setAccessible(true);
+            startPortField.set(rule, new Integer(443));
+
+            Field endPortField = rule.getClass().getDeclaredField("publicEndPort");
+            endPortField.setAccessible(true);
+            endPortField.set(rule, new Integer(443));
+
+            Field cidrField = rule.getClass().getDeclaredField("cidrlist");
+            cidrField.setAccessible(true);
+            cidrField.set(rule, sourceCidrList);
+
+            _firewallService.createIngressFirewallRule(rule);
+            _firewallService.applyIngressFwRules(publicIp.getId(), account);
+
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Provisioned firewall rule to open up port 443 on " + publicIp.getAddress() +
+                        " for cluster " + containerCluster.getName());
+            }
+        } catch (RuntimeException rte) {
+            s_logger.warn("Failed to provision firewall rules for the container cluster: " + containerCluster.getName()
+                    + " due to exception: " + getStackTrace(rte));
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            throw new ManagementServerException("Failed to provision firewall rules for the container " +
+                    "cluster: " + containerCluster.getName(), rte);
+        } catch (Exception e) {
+            s_logger.warn("Failed to provision firewall rules for the container cluster: " + containerCluster.getName()
+                    + " due to exception: " + getStackTrace(e));
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            throw new ManagementServerException("Failed to provision firewall rules for the container " +
+                    "cluster: " + containerCluster.getName());
+        }
+
+        Nic masterVmNic = _networkModel.getNicInNetwork(masterVmId, containerCluster.getNetworkId());
+        // handle Nic interface method change between releases 4.5 and 4.6 and above through reflection
+        Method m = null;
+        try {
+            m = Nic.class.getMethod("getIp4Address");
+        } catch (NoSuchMethodException e1) {
+            try {
+                m = Nic.class.getMethod("getIPv4Address");
+            } catch (NoSuchMethodException e2) {
+                stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+                throw new ManagementServerException("Failed to activate port forwarding rules for the cluster: " + containerCluster.getName());
+            }
+        }
+        Ip masterIp = null;
+        try {
+            masterIp = new Ip(m.invoke(masterVmNic).toString());
+        } catch (InvocationTargetException | IllegalAccessException ie) {
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            throw new ManagementServerException("Failed to activate port forwarding rules for the cluster: " + containerCluster.getName());
+        }
+        final Ip masterIpFinal = masterIp;
+        final long publicIpId = publicIp.getId();
+        final long networkId = containerCluster.getNetworkId();
+        final long accountId = account.getId();
+        final long domainId = account.getDomainId();
+        final long masterVmIdFinal = masterVmId;
+
+        try {
+            PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>() {
+                @Override
+                public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
+                    PortForwardingRuleVO newRule =
+                            new PortForwardingRuleVO(null, publicIpId,
+                                    443, 443,
+                                    masterIpFinal,
+                                    443, 443,
+                                    "tcp", networkId, accountId, domainId, masterVmIdFinal);
+                    newRule.setDisplay(true);
+                    newRule.setState(FirewallRule.State.Add);
+                    newRule = _portForwardingDao.persist(newRule);
+                    return newRule;
+                }
+            });
+            _rulesService.applyPortForwardingRules(publicIp.getId(), account);
+
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Provisioning port forwarding rule from port 443 on " + publicIp.getAddress() +
+                        " to the master VM IP :" + masterIpFinal + " in container cluster " + containerCluster.getName());
+            }
+        } catch (RuntimeException rte) {
+            s_logger.warn("Failed to activate port forwarding rules for the container cluster " + containerCluster.getName() + " due to "  + rte);
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            throw new ManagementServerException("Failed to activate port forwarding rules for the cluster: " + containerCluster.getName(), rte);
+        } catch (Exception e) {
+            s_logger.warn("Failed to activate port forwarding rules for the container cluster " + containerCluster.getName() + " due to "  + e);
+            stateTransitTo(containerClusterId, ApplicationCluster.Event.CreateFailed);
+            throw new ManagementServerException("Failed to activate port forwarding rules for the cluster: " + containerCluster.getName(), e);
+        }
+    }
+
+    public boolean validateNetwork(Network network) {
+        NetworkOffering nwkoff = _networkOfferingDao.findById(network.getNetworkOfferingId());
+        if (nwkoff.isSystemOnly()){
+            throw new InvalidParameterValueException("This network is for system use only, network id " + network.getId());
+        }
+        if (! _networkModel.areServicesSupportedInNetwork(network.getId(), Service.UserData)){
+            throw new InvalidParameterValueException("This network does not support userdata that is required for k8s, network id " + network.getId());
+        }
+        if (! _networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)){
+            throw new InvalidParameterValueException("This network does not support firewall that is required for k8s, network id " + network.getId());
+        }
+        if (! _networkModel.areServicesSupportedInNetwork(network.getId(), Service.PortForwarding)){
+            throw new InvalidParameterValueException("This network does not support port forwarding that is required for k8s, network id " + network.getId());
+        }
+        if (! _networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)){
+            throw new InvalidParameterValueException("This network does not support dhcp that is required for k8s, network id " + network.getId());
+        }
+
+        List<? extends IpAddress> addrs = _networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
+        IPAddressVO sourceNatIp = null;
+        if (addrs.isEmpty()) {
+            throw new InvalidParameterValueException("The network id:" + network.getId() + " does not have source NAT ip assoicated with it. " +
+                    "To provision a Container Cluster, a isolated network with source NAT is required." );
+        } else {
+            for (IpAddress addr : addrs) {
+                if (addr.isSourceNat()) {
+                    sourceNatIp = _publicIpAddressDao.findById(addr.getId());
+                }
+            }
+            if (sourceNatIp == null) {
+                throw new InvalidParameterValueException("The network id:" + network.getId() + " does not have source NAT ip assoicated with it. " +
+                        "To provision a Container Cluster, a isolated network with source NAT is required." );
+            }
+        }
+        List<FirewallRuleVO> rules= _firewallDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.Firewall);
+        for (FirewallRuleVO rule : rules) {
+            Integer startPort = rule.getSourcePortStart();
+            Integer endPort = rule.getSourcePortEnd();
+            s_logger.debug("Network rule : " + startPort + " " + endPort);
+            if (startPort <= 443 && 443 <= endPort) {
+                throw new InvalidParameterValueException("The network id:" + network.getId() + " has conflicting firewall rules to provision" +
+                        " container cluster." );
+            }
+        }
+
+        rules= _firewallDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.PortForwarding);
+        for (FirewallRuleVO rule : rules) {
+            Integer startPort = rule.getSourcePortStart();
+            Integer endPort = rule.getSourcePortEnd();
+            s_logger.debug("Network rule : " + startPort + " " + endPort);
+            if (startPort <= 443 && 443 <= endPort) {
+                throw new InvalidParameterValueException("The network id:" + network.getId() + " has conflicting port forwarding rules to provision" +
+                        " container cluster." );
+            }
+        }
+        return true;
+    }
+
+    public boolean validateServiceOffering(ServiceOffering offering) {
+        final int cpu_requested = offering.getCpu() * offering.getSpeed();
+        final int ram_requested = offering.getRamSize();
+        if (offering.isDynamic()){
+            throw new InvalidParameterValueException("This service offering is not suitable for k8s cluster as this is dynamic, service offering id is " + offering.getId());
+        }
+        if (ram_requested < 64){
+            throw new InvalidParameterValueException("This service offering is not suitable for k8s cluster as this has less than 256M of Ram, service offering id is " +  offering.getId());
+        }
+        if( cpu_requested < 200) {
+            throw new InvalidParameterValueException("This service offering is not suitable for k8s cluster as this has less than 600MHz of CPU, service offering id is " +  offering.getId());
+        }
+        return true;
+    }
+
+    private void validateDockerRegistryParams(final String dockerRegistryUserName,
+                                                 final String dockerRegistryPassword,
+                                                 final String dockerRegistryUrl,
+                                                 final String dockerRegistryEmail) {
+        // if no params related to docker registry specified then nothing to validate so return true
+        if ((dockerRegistryUserName == null || dockerRegistryUserName.isEmpty()) &&
+                (dockerRegistryPassword == null || dockerRegistryPassword.isEmpty())  &&
+                (dockerRegistryUrl == null || dockerRegistryUrl.isEmpty()) &&
+                (dockerRegistryEmail == null || dockerRegistryEmail.isEmpty())) {
+            return;
+        }
+
+        // all params related to docker registry must be specified or nothing
+        if (!((dockerRegistryUserName != null && !dockerRegistryUserName.isEmpty()) &&
+                (dockerRegistryPassword != null && !dockerRegistryPassword.isEmpty()) &&
+                (dockerRegistryUrl != null && !dockerRegistryUrl.isEmpty()) &&
+                (dockerRegistryEmail != null && !dockerRegistryEmail.isEmpty()))) {
+            throw new InvalidParameterValueException("All the docker private registry parameters (username, password, url, email) required are specified");
+        }
+
+        try {
+            URL url = new URL(dockerRegistryUrl);
+        } catch (MalformedURLException e) {
+            throw new InvalidParameterValueException("Invalid docker registry url specified");
+        }
+
+        Pattern VALID_EMAIL_ADDRESS_REGEX = Pattern.compile("^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}$", Pattern.CASE_INSENSITIVE);
+        Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(dockerRegistryEmail);
+        if (!matcher.find()) {
+            throw new InvalidParameterValueException("Invalid docker registry email specified");
+        }
+    }
+
+    public DeployDestination plan(final long clusterSize, final long dcId, final ServiceOffering offering) throws InsufficientServerCapacityException {
+        final int cpu_requested = offering.getCpu() * offering.getSpeed();
+        final long ram_requested = offering.getRamSize() * 1024L * 1024L;
+        List<HostVO> hosts = _resourceMgr.listAllHostsInOneZoneByType(Type.Routing, dcId);
+        final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
+        for (HostVO h : hosts) {
+           hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
+        }
+        boolean suitable_host_found=false;
+        for (int i=1; i <= clusterSize+1; i++) {
+            suitable_host_found=false;
+            for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
+                Pair<HostVO, Integer> hp = hostEntry.getValue();
+                HostVO h = hp.first();
+                int reserved = hp.second();
+                reserved++;
+                ClusterVO cluster = _clusterDao.findById(h.getClusterId());
+                ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
+                ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
+                Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+                Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+                if (s_logger.isDebugEnabled()){
+                    s_logger.debug("Checking host " + h.getId() + " for capacity already reserved " + reserved);
+                }
+                if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
+                    if (s_logger.isDebugEnabled()){
+                        s_logger.debug("Found host " + h.getId() + " has enough capacity cpu = " + cpu_requested * reserved + " ram =" + ram_requested * reserved);
+                    }
+                    hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
+                    suitable_host_found = true;
+                    break;
+                }
+            }
+            if (suitable_host_found){
+                continue;
+            }
+            else {
+                 if (s_logger.isDebugEnabled()){
+                     s_logger.debug("Suitable hosts not found in datacenter " + dcId + " for node " + i);
+                 }
+                break;
+            }
+        }
+        if (suitable_host_found){
+            if (s_logger.isDebugEnabled()){
+                s_logger.debug("Suitable hosts found in datacenter " + dcId + " creating deployment destination");
+            }
+            return new DeployDestination(_dcDao.findById(dcId), null, null, null);
+        }
+        String msg = String.format("Cannot find enough capacity for application_cluster(requested cpu=%1$s memory=%2$s)",
+                cpu_requested*clusterSize, ram_requested*clusterSize);
+        s_logger.warn(msg);
+        throw new InsufficientServerCapacityException(msg, DataCenter.class, dcId);
+    }
+
+    public DeployDestination plan(final long containerClusterId, final long dcId) throws InsufficientServerCapacityException {
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+        ServiceOffering offering = _srvOfferingDao.findById(containerCluster.getServiceOfferingId());
+
+        if (s_logger.isDebugEnabled()){
+            s_logger.debug("Checking deployment destination for containerClusterId= " + containerClusterId + " in dcId=" + dcId);
+        }
+
+        return plan(containerCluster.getNodeCount() + 1, dcId, offering);
+    }
+
+    @Override
+    public boolean stopContainerCluster(long containerClusterId) throws ManagementServerException {
+
+        final ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+        if (containerCluster == null) {
+            throw new ManagementServerException("Failed to find container cluster id: " + containerClusterId);
+        }
+
+        if (containerCluster.getRemoved() != null) {
+            throw new ManagementServerException("Container cluster id:" + containerClusterId + " is already deleted.");
+        }
+
+        if (containerCluster.getState().equals(ApplicationCluster.State.Stopped) ){
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Container cluster id: " + containerClusterId + " is already stopped.");
+            }
+            return true;
+        }
+
+        if (containerCluster.getState().equals(ApplicationCluster.State.Stopping) ){
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Container cluster id: " + containerClusterId + " is getting stopped.");
+            }
+            return true;
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Stopping container cluster: " + containerCluster.getName());
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.StopRequested);
+
+        for (final ApplicationClusterVmMapVO vmMapVO : _clusterVmMapDao.listByClusterId(containerClusterId)) {
+            final UserVmVO vm = _userVmDao.findById(vmMapVO.getVmId());
+            try {
+                if (vm == null) {
+                    stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+                    throw new ManagementServerException("Failed to start all VMs in container cluster id: " + containerClusterId);
+                }
+                stopClusterVM(vmMapVO);
+            } catch (ServerApiException ex) {
+                s_logger.warn("Failed to stop VM in container cluster id:" + containerClusterId + " due to " + ex);
+                // dont bail out here. proceed further to stop the reset of the VM's
+            }
+        }
+
+        for (final ApplicationClusterVmMapVO vmMapVO : _clusterVmMapDao.listByClusterId(containerClusterId)) {
+            final UserVmVO vm = _userVmDao.findById(vmMapVO.getVmId());
+            if (vm == null || !vm.getState().equals(VirtualMachine.State.Stopped)) {
+                stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+                throw new ManagementServerException("Failed to stop all VMs in container cluster id: " + containerClusterId);
+            }
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationSucceeded);
+        return true;
+    }
+
+    private boolean isAddOnServiceRunning(Long clusterId, String svcName) {
+
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(clusterId);
+
+        //FIXME: whole logic needs revamp. Assumption that management server has public network access is not practical
+        IPAddressVO publicIp = null;
+        List<IPAddressVO> ips = _publicIpAddressDao.listByAssociatedNetwork(containerCluster.getNetworkId(), true);
+        publicIp = ips.get(0);
+
+        Runtime r = Runtime.getRuntime();
+        int nodePort = 0;
+        try {
+            ApplicationClusterDetailsVO clusterDetails = _applicationClusterDetailsDao.findByClusterId(containerCluster.getId());
+            String execStr = "kubectl -s https://" + publicIp.getAddress().addr() + "/ --username=admin "
+                    + " --password=" + clusterDetails.getPassword()
+                    + " get pods --insecure-skip-tls-verify=true --namespace=kube-system";
+            Process p = r.exec(execStr);
+            p.waitFor();
+            BufferedReader b = new BufferedReader(new InputStreamReader(p.getInputStream(), "UTF8"));
+            String line = "";
+            while ((line = b.readLine()) != null) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("KUBECTL : " + line);
+                }
+                if (line.contains(svcName) && line.contains("Running")) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Service :" + svcName + " for the container cluster "
+                                + containerCluster.getName() + " is running");
+                    }
+                    b.close();
+                    return true;
+                }
+            }
+            b.close();
+        } catch (IOException excep) {
+            s_logger.warn("KUBECTL: " + excep);
+        } catch (InterruptedException e) {
+            s_logger.warn("KUBECTL: " + e);
+        }
+        return false;
+    }
+
+    @Override
+    public boolean deleteContainerCluster(Long containerClusterId) throws ManagementServerException {
+
+        ApplicationClusterVO cluster = _applicationClusterDao.findById(containerClusterId);
+        if (cluster == null) {
+            throw new InvalidParameterValueException("Invalid cluster id specified");
+        }
+
+        CallContext ctx = CallContext.current();
+        Account caller = ctx.getCallingAccount();
+
+        _accountMgr.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, cluster);
+
+        return cleanupContainerClusterResources(containerClusterId);
+    }
+
+    private boolean cleanupContainerClusterResources(Long containerClusterId) throws ManagementServerException {
+
+        ApplicationClusterVO cluster = _applicationClusterDao.findById(containerClusterId);
+
+        if (!(cluster.getState().equals(ApplicationCluster.State.Running)
+                || cluster.getState().equals(ApplicationCluster.State.Stopped)
+                || cluster.getState().equals(ApplicationCluster.State.Alert)
+                || cluster.getState().equals(ApplicationCluster.State.Error)
+                || cluster.getState().equals(ApplicationCluster.State.Destroying))) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Cannot perform delete operation on cluster:" + cluster.getName() + " in state " + cluster.getState() );
+            }
+            throw new PermissionDeniedException("Cannot perform delete operation on cluster: " + cluster.getName() + " in state" + cluster.getState() );
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.DestroyRequested);
+
+        boolean failedVmDestroy = false;
+        List<ApplicationClusterVmMapVO> clusterVMs = _applicationClusterVmMapDao.listByClusterId(cluster.getId());
+        if ( (clusterVMs != null) && !clusterVMs.isEmpty()) {
+            for (ApplicationClusterVmMapVO clusterVM: clusterVMs) {
+                long vmID = clusterVM.getVmId();
+
+                // delete only if VM exists and is not removed
+                UserVmVO userVM = _vmDao.findById(vmID);
+                if (userVM== null || userVM.isRemoved()) {
+                    continue;
+                }
+
+                try {
+                    _userVmService.destroyVm(vmID,true);
+                    _applicationClusterVmMapDao.expunge(clusterVM.getId());
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Destroyed VM: " + userVM.getInstanceName() + " as part of cluster: " + cluster.getName() + " destroy.");
+                    }
+                } catch (Exception e ) {
+                    failedVmDestroy = true;
+                    s_logger.warn("Failed to destroy VM :" + userVM.getInstanceName() + " part of the cluster: " + cluster.getName() +
+                            " due to " + e);
+                    s_logger.warn("Moving on with destroying remaining resources provisioned for the cluster: " + cluster.getName());
+                }
+            }
+        }
+        ApplicationClusterDetailsVO clusterDetails = _applicationClusterDetailsDao.findByClusterId(containerClusterId);
+        boolean cleanupNetwork = clusterDetails.getNetworkCleanup();
+
+        // if there are VM's that were not expunged, we can not delete the network
+        if(!failedVmDestroy) {
+            if (cleanupNetwork) {
+                NetworkVO network = null;
+                try {
+                    network = _networkDao.findById(cluster.getNetworkId());
+                    if (network != null && network.getRemoved() == null) {
+                        Account owner = _accountMgr.getAccount(network.getAccountId());
+                        User callerUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId());
+                        ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner);
+                        boolean networkDestroyed = _networkMgr.destroyNetwork(cluster.getNetworkId(), context, true);
+                        if (!networkDestroyed) {
+                            if (s_logger.isDebugEnabled()) {
+                                s_logger.debug("Failed to destroy network: " + cluster.getNetworkId() +
+                                        " as part of cluster: " + cluster.getName()+ " destroy");
+                            }
+                            processFailedNetworkDelete(containerClusterId);
+                            throw new ManagementServerException("Failed to delete the network as part of container cluster name:" + cluster.getName() + " clean up");
+                        }
+                        if(s_logger.isDebugEnabled()) {
+                            s_logger.debug("Destroyed network: " +  network.getName() + " as part of cluster: " + cluster.getName() + " destroy");
+                        }
+                    }
+                } catch (Exception e) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Failed to destroy network: " + cluster.getNetworkId() +
+                                " as part of cluster: " + cluster.getName() + "  destroy due to " + e);
+                    }
+                    processFailedNetworkDelete(containerClusterId);
+                    throw new ManagementServerException("Failed to delete the network as part of container cluster name:" + cluster.getName() + " clean up");
+                }
+            }
+        } else {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("There are VM's that are not expunged in container cluster " + cluster.getName());
+            }
+            processFailedNetworkDelete(containerClusterId);
+            throw new ManagementServerException("Failed to destroy one or more VM's as part of container cluster name:" + cluster.getName() + " clean up");
+        }
+
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationSucceeded);
+
+        cluster = _applicationClusterDao.findById(containerClusterId);
+        cluster.setCheckForGc(false);
+        _applicationClusterDao.update(cluster.getId(), cluster);
+
+        _applicationClusterDao.remove(cluster.getId());
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Container cluster name:" + cluster.getName() + " is successfully deleted");
+        }
+
+        return true;
+    }
+
+    void processFailedNetworkDelete(long containerClusterId) {
+        stateTransitTo(containerClusterId, ApplicationCluster.Event.OperationFailed);
+        ApplicationClusterVO cluster = _applicationClusterDao.findById(containerClusterId);
+        cluster.setCheckForGc(true);
+        _applicationClusterDao.update(cluster.getId(), cluster);
+    }
+
+    UserVm createK8SMaster(final ApplicationClusterVO containerCluster, final IPAddressVO publicIP) throws ManagementServerException,
+            ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException {
+
+        UserVm masterVm = null;
+
+        DataCenter zone = _dcDao.findById(containerCluster.getZoneId());
+        ServiceOffering serviceOffering = _offeringDao.findById(containerCluster.getServiceOfferingId());
+        VirtualMachineTemplate template = _templateDao.findById(containerCluster.getTemplateId());
+
+        List<Long> networkIds = new ArrayList<Long>();
+        networkIds.add(containerCluster.getNetworkId());
+
+        Account owner = _accountDao.findById(containerCluster.getAccountId());
+
+        Network.IpAddresses addrs = new Network.IpAddresses(null, null);
+
+        Map<String, String> customparameterMap = new HashMap<String, String>();
+
+        String hostName = containerCluster.getName() + "-k8s-master";
+
+        String k8sMasterConfig = null;
+        try {
+            String masterCloudConfig = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterMasterCloudConfig.key());
+            k8sMasterConfig = readFile(masterCloudConfig);
+
+            final String user = "{{ k8s_master.user }}";
+            final String password = "{{ k8s_master.password }}";
+            final String apiServerCert = "{{ k8s_master.apiserver.crt }}";
+            final String apiServerKey = "{{ k8s_master.apiserver.key }}";
+            final String caCert = "{{ k8s_master.ca.crt }}";
+
+            final KeystoreVO rootCA = keystoreDao.findByName(CCS_ROOTCA_KEYPAIR);
+            final PrivateKey rootCAPrivateKey = pemToRSAPrivateKey(rootCA.getKey());
+            final X509Certificate rootCACert = pemToX509Cert(rootCA.getCertificate());
+            final KeyPair keyPair = generateRandomKeyPair();
+            final String tlsClientCert = x509CertificateToPem(generateClientCertificate(rootCAPrivateKey, rootCACert, keyPair, publicIP.getAddress().addr(), true));
+            final String tlsPrivateKey = rsaPrivateKeyToPem(keyPair.getPrivate());
+
+            k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n      "));
+            k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n      "));
+            k8sMasterConfig = k8sMasterConfig.replace(caCert, rootCA.getCertificate().replace("\n", "\n      "));
+
+            ApplicationClusterDetailsVO clusterDetails = _applicationClusterDetailsDao.findByClusterId(containerCluster.getId());
+            k8sMasterConfig = k8sMasterConfig.replace(password, clusterDetails.getPassword());
+            k8sMasterConfig = k8sMasterConfig.replace(user, clusterDetails.getUserName());
+        } catch (RuntimeException e ) {
+            s_logger.error("Failed to read kubernetes master configuration file due to " + e);
+            throw new ManagementServerException("Failed to read kubernetes master configuration file", e);
+        } catch (Exception e) {
+            s_logger.error("Failed to read kubernetes master configuration file due to " + e);
+            throw new ManagementServerException("Failed to read kubernetes master configuration file", e);
+        }
+
+        String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(Charset.forName("UTF-8")));
+
+        masterVm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
+                hostName, containerCluster.getDescription(), null, null, null,
+                null, BaseCmd.HTTPMethod.POST, base64UserData, containerCluster.getKeyPair(),
+                null, addrs, null, null, null, customparameterMap, null);
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Created master VM: " + hostName + " in the container cluster: " + containerCluster.getName());
+        }
+
+        return masterVm;
+    }
+
+
+    UserVm createK8SNode(ApplicationClusterVO containerCluster, String masterIp, int nodeInstance) throws ManagementServerException,
+            ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException {
+
+        UserVm nodeVm = null;
+
+        DataCenter zone = _dcDao.findById(containerCluster.getZoneId());
+        ServiceOffering serviceOffering = _offeringDao.findById(containerCluster.getServiceOfferingId());
+        VirtualMachineTemplate template = _templateDao.findById(containerCluster.getTemplateId());
+
+        List<Long> networkIds = new ArrayList<Long>();
+        networkIds.add(containerCluster.getNetworkId());
+
+        Account owner = _accountDao.findById(containerCluster.getAccountId());
+
+        Network.IpAddresses addrs = new Network.IpAddresses(null, null);
+
+        Map<String, String> customparameterMap = new HashMap<String, String>();
+
+        String hostName = containerCluster.getName() + "-k8s-node-" + String.valueOf(nodeInstance);
+
+        String k8sNodeConfig = null;
+        try {
+            String nodeCloudConfig = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterNodeCloudConfig.key());
+            k8sNodeConfig = readFile(nodeCloudConfig).toString();
+            String masterIPString = "{{ k8s_master.default_ip }}";
+            final String clientCert = "{{ k8s_node.client.crt }}";
+            final String clientKey = "{{ k8s_node.client.key }}";
+            final String caCert = "{{ k8s_node.ca.crt }}";
+
+            final KeystoreVO rootCA = keystoreDao.findByName(CCS_ROOTCA_KEYPAIR);
+            final PrivateKey rootCAPrivateKey = pemToRSAPrivateKey(rootCA.getKey());
+            final X509Certificate rootCACert = pemToX509Cert(rootCA.getCertificate());
+            final KeyPair keyPair = generateRandomKeyPair();
+            final String tlsClientCert = x509CertificateToPem(generateClientCertificate(rootCAPrivateKey, rootCACert, keyPair, "", false));
+            final String tlsPrivateKey = rsaPrivateKeyToPem(keyPair.getPrivate());
+
+            k8sNodeConfig = k8sNodeConfig.replace(masterIPString, masterIp);
+            k8sNodeConfig = k8sNodeConfig.replace(clientCert, tlsClientCert.replace("\n", "\n      "));
+            k8sNodeConfig = k8sNodeConfig.replace(clientKey, tlsPrivateKey.replace("\n", "\n      "));
+            k8sNodeConfig = k8sNodeConfig.replace(caCert, rootCA.getCertificate().replace("\n", "\n      "));
+
+            ApplicationClusterDetailsVO clusterDetails = _applicationClusterDetailsDao.findByClusterId(containerCluster.getId());
+
+            /* genarate /.docker/config.json file on the nodes only if container cluster is created to
+             * use docker private registry */
+            String dockerUserName = clusterDetails.getRegistryUsername();
+            String dockerPassword = clusterDetails.getRegistryPassword();
+            if (dockerUserName != null && !dockerUserName.isEmpty() && dockerPassword != null && !dockerPassword.isEmpty()) {
+                // do write file for  /.docker/config.json through the code instead of k8s-node.yml as we can no make a section
+                // optional or conditionally applied
+                String dockerConfigString = "write-files:\n" +
+                        "  - path: /.docker/config.json\n" +
+                        "    owner: core:core\n" +
+                        "    permissions: '0644'\n" +
+                        "    content: |\n" +
+                        "      {\n" +
+                        "        \"auths\": {\n" +
+                        "          {{docker.url}}: {\n" +
+                        "            \"auth\": {{docker.secret}},\n" +
+                        "            \"email\": {{docker.email}}\n" +
+                        "          }\n" +
+                        "         }\n" +
+                        "      }";
+                k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString);
+                String dockerUrl = "{{docker.url}}";
+                String dockerAuth = "{{docker.secret}}";
+                String dockerEmail = "{{docker.email}}";
+                String usernamePassword = dockerUserName + ":" + dockerPassword;
+                String base64Auth = Base64.encodeBase64String(usernamePassword.getBytes(Charset.forName("UTF-8")));
+                k8sNodeConfig = k8sNodeConfig.replace(dockerUrl, "\"" + clusterDetails.getRegistryUrl() + "\"");
+                k8sNodeConfig = k8sNodeConfig.replace(dockerAuth, "\"" + base64Auth + "\"");
+                k8sNodeConfig = k8sNodeConfig.replace(dockerEmail, "\"" + clusterDetails.getRegistryEmail() + "\"");
+            }
+        } catch (RuntimeException e ) {
+            s_logger.warn("Failed to read node configuration file due to " + e );
+            throw new ManagementServerException("Failed to read cluster node configuration file.", e);
+        } catch (Exception e) {
+            s_logger.warn("Failed to read node configuration file due to " + e );
+            throw new ManagementServerException("Failed to read cluster node configuration file.", e);
+        }
+
+        String base64UserData = Base64.encodeBase64String(k8sNodeConfig.getBytes(Charset.forName("UTF-8")));
+
+        nodeVm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
+                hostName, containerCluster.getDescription(), null, null, null,
+                null, BaseCmd.HTTPMethod.POST, base64UserData, containerCluster.getKeyPair(),
+                null, addrs, null, null, null, customparameterMap, null);
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Created cluster node VM: " + hostName + " in the container cluster: " + containerCluster.getName());
+        }
+
+        return nodeVm;
+    }
+
+    private void startClusterVM(final UserVm vm, final ApplicationClusterVO containerCluster) throws ServerApiException {
+
+        try {
+            StartVMCmd startVm = new StartVMCmd();
+            startVm = ComponentContext.inject(startVm);
+            Field f = startVm.getClass().getDeclaredField("id");
+            f.setAccessible(true);
+            f.set(startVm, vm.getId());
+            _userVmService.startVirtualMachine(startVm);
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Started VM in the container cluster: " + containerCluster.getName());
+            }
+        } catch (ConcurrentOperationException ex) {
+            s_logger.warn("Failed to start VM in the container cluster name:" + containerCluster.getName() + " due to Exception: " , ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM in the container cluster name:" + containerCluster.getName(), ex);
+        } catch (ResourceUnavailableException ex) {
+            s_logger.warn("Failed to start VM in the container cluster name:" + containerCluster.getName() + " due to Exception: " , ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM in the container cluster name:" + containerCluster.getName(), ex);
+        } catch (InsufficientCapacityException ex) {
+            s_logger.warn("Failed to start VM in the container cluster name:" + containerCluster.getName() + " due to Exception: " , ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM in the container cluster name:" + containerCluster.getName(), ex);
+        } catch (RuntimeException ex) {
+            s_logger.warn("Failed to start VM in the container cluster name:" + containerCluster.getName() + " due to Exception: " , ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM in the container cluster name:" + containerCluster.getName(), ex);
+        } catch (Exception ex) {
+            s_logger.warn("Failed to start VM in the container cluster name:" + containerCluster.getName() + " due to Exception: " , ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM in the container cluster name:" + containerCluster.getName(), ex);
+        }
+
+        UserVm startVm = _vmDao.findById(vm.getId());
+        if (!startVm.getState().equals(VirtualMachine.State.Running)) {
+            s_logger.warn("Failed to start VM instance.");
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start VM instance in container cluster " + containerCluster.getName());
+        }
+    }
+
+    private void stopClusterVM(final ApplicationClusterVmMapVO vmMapVO) throws ServerApiException {
+        try {
+            _userVmService.stopVirtualMachine(vmMapVO.getVmId(), false);
+        } catch (ConcurrentOperationException ex) {
+            s_logger.warn("Failed to stop container cluster VM due to Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        }
+    }
+
+    @Override
+    public ListResponse<ApplicationClusterResponse>  listApplicationClusters(ListApplicationClusterCmd cmd) {
+
+        CallContext ctx = CallContext.current();
+        Account caller = ctx.getCallingAccount();
+
+        ListResponse<ApplicationClusterResponse> response = new ListResponse<ApplicationClusterResponse>();
+
+        List<ApplicationClusterResponse> responsesList = new ArrayList<ApplicationClusterResponse>();
+        SearchCriteria<ApplicationClusterVO> sc = _applicationClusterDao.createSearchCriteria();
+
+        String state = cmd.getState();
+        if (state != null && !state.isEmpty()) {
+            if ( !ApplicationCluster.State.Running.toString().equals(state) &&
+                    !ApplicationCluster.State.Stopped.toString().equals(state) &&
+                    !ApplicationCluster.State.Destroyed.toString().equals(state)) {
+                throw new InvalidParameterValueException("Invalid vlaue for cluster state is specified");
+            }
+        }
+
+        if (cmd.getId() != null) {
+            ApplicationClusterVO cluster = _applicationClusterDao.findById(cmd.getId());
+            if (cluster == null) {
+                throw new InvalidParameterValueException("Invalid cluster id specified");
+            }
+            _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ListEntry, false, cluster);
+            responsesList.add(createContainerClusterResponse(cmd.getId()));
+        } else {
+            Filter searchFilter = new Filter(ApplicationClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
+
+            if (state != null && !state.isEmpty()) {
+                sc.addAnd("state", SearchCriteria.Op.EQ, state);
+            }
+
+            if (_accountMgr.isNormalUser(caller.getId())) {
+                sc.addAnd("accountId", SearchCriteria.Op.EQ, caller.getAccountId());
+            } else if (_accountMgr.isDomainAdmin(caller.getId())) {
+                sc.addAnd("domainId", SearchCriteria.Op.EQ, caller.getDomainId());
+            }
+
+            String name = cmd.getName();
+            if (name != null && !name.isEmpty()) {
+                sc.addAnd("name", SearchCriteria.Op.LIKE, name);
+            }
+
+            List<ApplicationClusterVO> containerClusters = _applicationClusterDao.search(sc, searchFilter);
+            for (ApplicationClusterVO cluster : containerClusters) {
+                ApplicationClusterResponse clusterReponse = createContainerClusterResponse(cluster.getId());
+                responsesList.add(clusterReponse);
+            }
+        }
+        response.setResponses(responsesList);
+        return response;
+    }
+
+    public ApplicationClusterResponse createContainerClusterResponse(long containerClusterId) {
+
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+        ApplicationClusterResponse response = new ApplicationClusterResponse();
+
+        response.setId(containerCluster.getUuid());
+
+        response.setName(containerCluster.getName());
+
+        response.setDescription(containerCluster.getDescription());
+
+        DataCenterVO zone = ApiDBUtils.findZoneById(containerCluster.getZoneId());
+        response.setZoneId(zone.getUuid());
+        response.setZoneName(zone.getName());
+
+        response.setClusterSize(String.valueOf(containerCluster.getNodeCount()));
+
+        VMTemplateVO template = ApiDBUtils.findTemplateById(containerCluster.getTemplateId());
+        response.setTemplateId(template.getUuid());
+
+        ServiceOfferingVO offering = _srvOfferingDao.findById(containerCluster.getServiceOfferingId());
+        response.setServiceOfferingId(offering.getUuid());
+
+        response.setServiceOfferingName(offering.getName());
+
+        response.setKeypair(containerCluster.getKeyPair());
+
+        response.setState(containerCluster.getState().toString());
+
+        response.setCores(String.valueOf(containerCluster.getCores()));
+
+        response.setMemory(String.valueOf(containerCluster.getMemory()));
+
+        response.setObjectName("applicationcluster");
+
+        NetworkVO ntwk = _networkDao.findByIdIncludingRemoved(containerCluster.getNetworkId());
+
+        response.setEndpoint(containerCluster.getEndpoint());
+
+        response.setNetworkId(ntwk.getUuid());
+
+        response.setAssociatedNetworkName(ntwk.getName());
+
+        response.setConsoleEndpoint(containerCluster.getConsoleEndpoint());
+
+        List<String> vmIds = new ArrayList<String>();
+        List<ApplicationClusterVmMapVO> vmList = _applicationClusterVmMapDao.listByClusterId(containerCluster.getId());
+        if (vmList != null && !vmList.isEmpty()) {
+            for (ApplicationClusterVmMapVO vmMapVO: vmList) {
+                UserVmVO userVM = _userVmDao.findById(vmMapVO.getVmId());
+                if (userVM != null) {
+                    vmIds.add(userVM.getUuid());
+                }
+            }
+        }
+
+        response.setVirtualMachineIds(vmIds);
+
+        ApplicationClusterDetailsVO clusterDetails = _applicationClusterDetailsDao.findByClusterId(containerCluster.getId());
+        if (clusterDetails != null) {
+            response.setUsername(clusterDetails.getUserName());
+            response.setPassword(clusterDetails.getPassword());
+        }
+
+        return response;
+    }
+
+    static String readFile(String path) throws IOException
+    {
+        byte[] encoded = Files.readAllBytes(Paths.get(path));
+        return new String(encoded, StandardCharsets.UTF_8);
+    }
+
+    protected boolean stateTransitTo(long containerClusterId, ApplicationCluster.Event e) {
+        ApplicationClusterVO containerCluster = _applicationClusterDao.findById(containerClusterId);
+        try {
+            return _stateMachine.transitTo(containerCluster, e, null, _applicationClusterDao);
+        } catch (NoTransitionException nte) {
+            s_logger.warn("Failed to transistion state of the container cluster: " + containerCluster.getName()
+                    + " in state " + containerCluster.getState().toString() + " on event " + e.toString());
+            return false;
+        }
+    }
+
+    private static String getStackTrace(final Throwable throwable) {
+        final StringWriter sw = new StringWriter();
+        final PrintWriter pw = new PrintWriter(sw, true);
+        throwable.printStackTrace(pw);
+        return sw.getBuffer().toString();
+    }
+
+    private boolean isApplicationClusterServiceConfigured(DataCenter zone) {
+
+        String templateName = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterTemplateName.key());
+        if (templateName == null || templateName.isEmpty()) {
+            s_logger.warn("Global setting " + ApplicationClusterConfig.ApplicationClusterTemplateName.key() + " is empty." +
+                    "Template name need to be specified, for container service to function.");
+            return false;
+        }
+
+        final VMTemplateVO template = _templateDao.findByTemplateName(templateName);
+        if (template == null) {
+           s_logger.warn("Unable to find the template:" + templateName  + " to be used for provisioning cluster");
+            return false;
+        }
+
+        String masterCloudConfig = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterMasterCloudConfig.key());
+        if (masterCloudConfig == null || masterCloudConfig.isEmpty()) {
+            s_logger.warn("global setting " + ApplicationClusterConfig.ApplicationClusterMasterCloudConfig.key() + " is empty." +
+                    "Admin has not specified the cloud config template to be used for provisioning master VM");
+            return false;
+        }
+
+        String nodeCloudConfig = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterNodeCloudConfig.key());
+        if (nodeCloudConfig == null || nodeCloudConfig.isEmpty()) {
+            s_logger.warn("global setting " + ApplicationClusterConfig.ApplicationClusterNodeCloudConfig.key() + " is empty." +
+                    "Admin has not specified the cloud config template to be used for provisioning node VM's");
+            return false;
+        }
+
+
+        String networkOfferingName = _globalConfigDao.getValue(ApplicationClusterConfig.ApplicationClusterNetworkOffering.key());
+        if (networkOfferingName == null || networkOfferingName.isEmpty()) {
+            s_logger.warn("global setting " + ApplicationClusterConfig.ApplicationClusterNetworkOffering.key()  + " is empty. " +
+                    "Admin has not yet specified the network offering to be used for provisioning isolated network for the cluster.");
+            return false;
+        }
+
+        NetworkOfferingVO networkOffering = _networkOfferingDao.findByUniqueName(networkOfferingName);
+        if (networkOffering == null) {
+            s_logger.warn("Network offering with name :" + networkOfferingName + " specified by admin is not found.");
+            return false;
+        }
+
+        if (networkOffering.getState() == NetworkOffering.State.Disabled) {
+            s_logger.warn("Network offering :" + networkOfferingName + "is not enabled.");
+            return false;
+        }
+
+        List<String> services = _ntwkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId());
+        if (services == null || services.isEmpty() || !services.contains("SourceNat")) {
+            s_logger.warn("Network offering :" + networkOfferingName + " does not have necessary services to provision container cluster");
+            return false;
+        }
+
+        if (networkOffering.getEgressDefaultPolicy() == false) {
+            s_logger.warn("Network offering :" + networkOfferingName + "has egress default policy turned off should be on to provision container cluster.");
+            return false;
+        }
+
+        long physicalNetworkId = _networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType());
+        PhysicalNetwork physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId);
+        if (physicalNetwork == null) {
+            s_logger.warn("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + networkOffering.getTags());
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public List<Class<?>> getCommands() {
+        List<Class<?>> cmdList = new ArrayList<Class<?>>();
+        cmdList.add(CreateApplicationClusterCmd.class);
+        cmdList.add(StartApplicationClusterCmd.class);
+        cmdList.add(StopApplicationClusterCmd.class);
+        cmdList.add(DeleteApplicationClusterCmd.class);
+        cmdList.add(ListApplicationClusterCmd.class);
+        cmdList.add(ListApplicationClusterCACertCmd.class);
+        return cmdList;
+    }
+
+    // Garbage collector periodically run through the container clusters marked for GC. For each container cluster
+    // marked for GC, attempt is made to destroy cluster.
+    public class ApplicationClusterGarbageCollector extends ManagedContextRunnable {
+        @Override
+        protected void runInContext() {
+            GlobalLock gcLock = GlobalLock.getInternLock("ApplicationCluster.GC.Lock");
+            try {
+                if (gcLock.lock(3)) {
+                    try {
+                        reallyRun();
+                    } finally {
+                        gcLock.unlock();
+                    }
+                }
+            } finally {
+                gcLock.releaseRef();
+            }
+        }
+
+        public void reallyRun() {
+            try {
+                List<ApplicationClusterVO> clusters = _applicationClusterDao.findClustersToGarbageCollect();
+                for (ApplicationCluster cluster :clusters ) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Running application cluster garbage collector on container cluster name:" + cluster.getName());
+                    }
+                    try {
+                        if (cleanupContainerClusterResources(cluster.getId())) {
+                            if (s_logger.isDebugEnabled()) {
+                                s_logger.debug("Application cluster: " + cluster.getName() + " is successfully garbage collected");
+                            }
+                        } else {
+                            if (s_logger.isDebugEnabled()) {
+                                s_logger.debug("Application cluster: " + cluster.getName() + " failed to get" +
+                                        " garbage collected. Will be attempted to garbage collected in next run");
+                            }
+                        }
+                    } catch (RuntimeException e) {
+                        s_logger.debug("Faied to destroy application cluster name:" + cluster.getName() + " during GC due to " + e);
+                        // proceed furhter with rest of the container cluster garbage collection
+                    } catch (Exception e) {
+                        s_logger.debug("Faied to destroy application cluster name:" + cluster.getName() + " during GC due to " + e);
+                        // proceed furhter with rest of the container cluster garbage collection
+                    }
+                }
+            } catch (Exception e) {
+                s_logger.warn("Caught exception while running application cluster gc: ", e);
+            }
+        }
+    }
+
+    /**
+     * The ApplicationClusterStatusScanner checks if the application cluster is in the desired state. If it detects this application cluster
+     * is not in the desired state, it will trigger an event and marks the application cluster to be 'Alert' state. For e.g an
+     * application cluster in 'Running' state this means all the cluster's VM's of type Node should be running and
+     * the number of node VM's should be the same as the cluster size, and the master node VM is running. It is possible due, to
+     * out of band changes by the user or the hosts going down, we may end up one or more VM's in stopped state. in which case
+     * the scanner detects these changes and marks the cluster in 'Alert' state. Similarly a cluster in 'Stopped' state means
+     * all the cluster VM's are in stopped state and any mismatch in states should get picked up by the scanner and it will then
+     * mark the application cluster to be in the 'Alert' state. Through recovery or reconciliation, clusters in 'Alert' will
+     * be brought back to a known good or desired state.
+     */
+    public class ApplicationClusterStatusScanner extends ManagedContextRunnable {
+        @Override
+        protected void runInContext() {
+            GlobalLock gcLock = GlobalLock.getInternLock("ApplicationCluster.State.Scanner.Lock");
+            try {
+                if (gcLock.lock(3)) {
+                    try {
+                        reallyRun();
+                    } finally {
+                        gcLock.unlock();
+                    }
+                }
+            } finally {
+                gcLock.releaseRef();
+            }
+        }
+
+        public void reallyRun() {
+            try {
+
+                // run through the list of application clusters in 'Running' state and ensure all the VM's are Running in the cluster
+                List<ApplicationClusterVO> runningClusters = _applicationClusterDao.findClustersInState(ApplicationCluster.State.Running);
+                for (ApplicationCluster cluster : runningClusters ) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Running application cluster state scanner on cluster:" + cluster.getName() + " for state " + ApplicationCluster.State.Starting);
+                    }
+                    try {
+                        if (!isClusterInDesiredState(cluster, VirtualMachine.State.Running)) {
+                            stateTransitTo(cluster.getId(), ApplicationCluster.Event.FaultsDetected);
+                        }
+                    } catch (Exception e) {
+                        s_logger.warn("Failed to run through VM states of application cluster due to " + e);
+                    }
+                }
+
+                // run through container clusters in 'Stopped' state and ensure all the VM's are Stopped in the cluster
+                List<ApplicationClusterVO> stoppedClusters = _applicationClusterDao.findClustersInState(ApplicationCluster.State.Stopped);
+                for (ApplicationCluster cluster : stoppedClusters ) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Running application cluster state scanner on cluster:" + cluster.getName() + " for state " + ApplicationCluster.State.Stopped);
+                    }
+                    try {
+                        if (!isClusterInDesiredState(cluster, VirtualMachine.State.Stopped)) {
+                            stateTransitTo(cluster.getId(), ApplicationCluster.Event.FaultsDetected);
+                        }
+                    } catch (Exception e) {
+                        s_logger.warn("Failed to run through VM states of container cluster due to " + e);
+                    }
+                }
+
+                // run through container clusters in 'Alert' state and reconcile state as 'Running' if the VM's are running
+                List<ApplicationClusterVO> clustersInAlertState = _applicationClusterDao.findClustersInState(ApplicationCluster.State.Alert);
+                for (ApplicationCluster cluster : clustersInAlertState ) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Running application cluster state scanner on cluster:" + cluster.getName() + " for state " + ApplicationCluster.State.Alert);
+                    }
+                    try {
+                        if (isClusterInDesiredState(cluster, VirtualMachine.State.Running)) {
+                            // mark the cluster to be running
+                            stateTransitTo(cluster.getId(), ApplicationCluster.Event.RecoveryRequested);
+                            stateTransitTo(cluster.getId(), ApplicationCluster.Event.OperationSucceeded);
+                        }
+                    } catch (Exception e) {
+                        s_logger.warn("Failed to run through VM states of application cluster status scanner due to " + e);
+                    }
+                }
+
+            } catch (RuntimeException e) {
+                s_logger.warn("Caught exception while running application cluster state scanner.", e);
+            } catch (Exception e) {
+                s_logger.warn("Caught exception while running application cluster state scanner.", e);
+            }
+        }
+    }
+
+    // checks if container cluster is in desired state
+    boolean isClusterInDesiredState(ApplicationCluster applicationCluster, VirtualMachine.State state) {
+        List<ApplicationClusterVmMapVO> clusterVMs = _applicationClusterVmMapDao.listByClusterId(applicationCluster.getId());
+
+        // check if all the VM's are in same state
+        for (ApplicationClusterVmMapVO clusterVm : clusterVMs) {
+            VMInstanceVO vm = _vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId());
+            if (vm.getState() != state) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Found VM in the application cluster: " + applicationCluster.getName() +
+                            " in state: " + vm.getState().toString() + " while expected to be in state: " + state.toString() +
+                            " So moving the cluster to Alert state for reconciliation.");
+                }
+                return false;
+            }
+        }
+
+        // check cluster is running at desired capacity include master node as well, so count should be cluster size + 1
+        // TODO size + 1 is very topology specific and needs adressing This should be an accumulation of nodetype.count for each nodetype
+        if (clusterVMs.size() != (applicationCluster.getNodeCount() + 1)) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Found only " + clusterVMs.size() + " VM's in the container cluster: " + applicationCluster.getName() +
+                        " in state: " + state.toString() + " While expected number of VM's to " +
+                        " be in state: " + state.toString() + " is " + (applicationCluster.getNodeCount() + 1) +
+                        " So moving the cluster to Alert state for reconciliation.");
+            }
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    public boolean start() {
+        _gcExecutor.scheduleWithFixedDelay(new ApplicationClusterGarbageCollector(), 300, 300, TimeUnit.SECONDS);
+        _stateScanner.scheduleWithFixedDelay(new ApplicationClusterStatusScanner(), 300, 30, TimeUnit.SECONDS);
+
+        // run the data base migration.
+        Properties dbProps = DbProperties.getDbProperties();
+        final String cloudUsername = dbProps.getProperty("db.cloud.username");
+        final String cloudPassword = dbProps.getProperty("db.cloud.password");
+        final String cloudHost = dbProps.getProperty("db.cloud.host");
+        final int cloudPort = Integer.parseInt(dbProps.getProperty("db.cloud.port"));
+        final String dbUrl = "jdbc:mysql://" + cloudHost + ":" + cloudPort + "/cloud";
+
+        try {
+            Flyway flyway = new Flyway();
+            flyway.setDataSource(dbUrl, cloudUsername, cloudPassword);
+
+            // name the meta table as sb_ccs_schema_version
+            flyway.setTable("application_cluster_service_version");
+
+            // make the existing cloud DB schema and data as baseline
+            flyway.setBaselineOnMigrate(true);
+            flyway.setBaselineVersionAsString("0");
+
+            // apply CCS schema
+            flyway.migrate();
+        } catch (FlywayException fwe) {
+            s_logger.error("Failed to run migration on Cloudstack Application Cluster Service database due to " + fwe);
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
+        _name = name;
+        _configParams = params;
+        _gcExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Application-Cluster-Scavenger"));
+        _stateScanner = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Application-Cluster-State-Scanner"));
+
+        final KeystoreVO keyStoreVO = keystoreDao.findByName(CCS_ROOTCA_KEYPAIR);
+        if (keyStoreVO == null) {
+                try {
+                    final KeyPair keyPair = generateRandomKeyPair();
+                    final String rootCACert = x509CertificateToPem(generateRootCACertificate(keyPair));
+                    final String rootCAKey = rsaPrivateKeyToPem(keyPair.getPrivate());
+                    keystoreDao.save(CCS_ROOTCA_KEYPAIR, rootCACert, rootCAKey, "");
+                    s_logger.info("No Container Cluster CA stores found, created and saved a keypair with certificate: \n" + rootCACert);
+                } catch (NoSuchProviderException | NoSuchAlgorithmException | CertificateEncodingException | SignatureException | InvalidKeyException | IOException e) {
+                    s_logger.error("Unable to create and save CCS rootCA keypair: " + e.toString());
+                }
+        }
+        return true;
+    }
+
+    /**
+     * @deprecated  this should move to {@link CertService}
+     * @param keyPair
+     * @return
+     * @throws NoSuchAlgorithmException
+     * @throws NoSuchProviderException
+     * @throws CertificateEncodingException
+     * @throws SignatureException
+     * @throws InvalidKeyException
+     */
+    @Deprecated
+    public X509Certificate generateRootCACertificate(KeyPair keyPair) throws NoSuchAlgorithmException, NoSuchProviderException, CertificateEncodingException, SignatureException, InvalidKeyException {
+        final DateTime now = DateTime.now(DateTimeZone.UTC);
+        final X500Principal dnName = new X500Principal(CCS_ROOTCA_CN);
+        final X509V1CertificateGenerator certGen = new X509V1CertificateGenerator();
+        certGen.setSerialNumber(BigInteger.valueOf(System.currentTimeMillis()));
+        certGen.setSubjectDN(dnName);
+        certGen.setIssuerDN(dnName);
+        certGen.setNotBefore(now.minusDays(1).toDate());
+        certGen.setNotAfter(now.plusYears(50).toDate());
+        certGen.setPublicKey(keyPair.getPublic());
+        certGen.setSignatureAlgorithm("SHA256WithRSAEncryption");
+        return certGen.generate(keyPair.getPrivate(), "BC");
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public X509Certificate generateClientCertificate(final PrivateKey rootCAPrivateKey, final X509Certificate rootCACert,
+                                                     final KeyPair keyPair, final String publicIPAddress, final boolean isMasterNode) throws IOException, CertificateParsingException, InvalidKeyException, NoSuchAlgorithmException, CertificateEncodingException, NoSuchProviderException, SignatureException, InvalidKeySpecException {
+        final DateTime now = DateTime.now(DateTimeZone.UTC);
+        final X509V3CertificateGenerator certGen = new X509V3CertificateGenerator();;
+        certGen.setSerialNumber(BigInteger.valueOf(System.currentTimeMillis()));
+        certGen.setIssuerDN(new X500Principal(CCS_ROOTCA_CN));
+        certGen.setSubjectDN(new X500Principal(CCS_CLUSTER_CN));
+        certGen.setNotBefore(now.minusDays(1).toDate());
+        certGen.setNotAfter(now.plusYears(10).toDate());
+        certGen.setPublicKey(keyPair.getPublic());
+        certGen.setSignatureAlgorithm("SHA256WithRSAEncryption");
+        certGen.addExtension(X509Extensions.AuthorityKeyIdentifier, false,
+                new AuthorityKeyIdentifierStructure(rootCACert));
+        certGen.addExtension(X509Extensions.SubjectKeyIdentifier, false,
+                new SubjectKeyIdentifier(keyPair.getPublic().getEncoded()));
+
+        if (isMasterNode) {
+            final List<ASN1Encodable> subjectAlternativeNames = new ArrayList<ASN1Encodable>();
+            subjectAlternativeNames.add(new GeneralName(GeneralName.iPAddress, publicIPAddress));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.iPAddress, "10.0.0.1"));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.iPAddress, "10.1.1.1"));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.dNSName, "kubernetes"));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.dNSName, "kubernetes.default"));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.dNSName, "kubernetes.default.svc"));
+            subjectAlternativeNames.add(new GeneralName(GeneralName.dNSName, "kubernetes.default.svc.cluster.local"));
+
+            final DERSequence subjectAlternativeNamesExtension = new DERSequence(
+                    subjectAlternativeNames.toArray(new ASN1Encodable[subjectAlternativeNames.size()]));
+            certGen.addExtension(X509Extensions.SubjectAlternativeName, false,
+                    subjectAlternativeNamesExtension);
+        }
+
+        return certGen.generate(rootCAPrivateKey, "BC");
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public KeyPair generateRandomKeyPair() throws NoSuchProviderException, NoSuchAlgorithmException {
+        Security.addProvider(new BouncyCastleProvider());
+        KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA", "BC");
+        keyPairGenerator.initialize(2048, new SecureRandom());
+        return keyPairGenerator.generateKeyPair();
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public KeyFactory getKeyFactory() {
+        KeyFactory keyFactory = null;
+        try {
+            Security.addProvider(new BouncyCastleProvider());
+            keyFactory = KeyFactory.getInstance("RSA", "BC");
+        } catch (NoSuchAlgorithmException | NoSuchProviderException e) {
+            s_logger.error("Unable to create KeyFactory:" + e.getMessage());
+        }
+        return keyFactory;
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public X509Certificate pemToX509Cert(final String pem) throws IOException {
+        return (X509Certificate) certService.parseCertificate(pem);
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public String x509CertificateToPem(final X509Certificate cert) throws IOException, CertificateEncodingException {
+
+        // TODO convert cert to PemObject
+        try (final StringWriter sw = new StringWriter();
+                final PemWriter pw = new PemWriter(sw)) {
+            final PemObject pemObject = new PemObject(cert.getType(), cert.getEncoded());
+            pw.writeObject(pemObject);
+            return sw.toString();
+        }
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public PrivateKey pemToRSAPrivateKey(final String pem) throws InvalidKeySpecException, IOException {
+        final PemReader pr = new PemReader(new StringReader(pem));
+        final PemObject pemObject = pr.readPemObject();
+        final KeyFactory keyFactory = getKeyFactory();
+        return keyFactory.generatePrivate(new PKCS8EncodedKeySpec(pemObject.getContent()));
+    }
+
+    /**
+     * @deprecated this should move to {@link CertService}
+     */
+    @Deprecated
+    public String rsaPrivateKeyToPem(final PrivateKey key) throws IOException {
+        final PemObject pemObject = new PemObject(CCS_RSA_PRIVATE_KEY, key.getEncoded());
+        final StringWriter sw = new StringWriter();
+        try (final PemWriter pw = new PemWriter(sw)) {
+            pw.writeObject(pemObject);
+        }
+        return sw.toString();
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterService.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterService.java
new file mode 100644
index 00000000000..ecae07c615f
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterService.java
@@ -0,0 +1,59 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.user.Account;
+import com.cloud.utils.component.PluggableService;
+import org.apache.cloudstack.api.command.user.applicationcluster.ListApplicationClusterCmd;
+import org.apache.cloudstack.api.response.ApplicationClusterResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+
+public interface ApplicationClusterService extends PluggableService {
+
+    ApplicationCluster findById(final Long id);
+
+    ApplicationCluster createContainerCluster(String name,
+                                          String displayName,
+                                          Long zoneId,
+                                          Long serviceOffering,
+                                          Account owner,
+                                          Long networkId,
+                                          String sshKeyPair,
+                                          Long nodeCount,
+                                          String dockerRegistryUsername,
+                                          String dockerRegistryPassword,
+                                          String dockerRegistryUrl,
+                                          String dockerRegistryEmail
+                                            ) throws InsufficientCapacityException,
+                     ResourceAllocationException, ManagementServerException;
+
+    boolean startContainerCluster(long containerClusterId, boolean onCreate) throws ManagementServerException,
+            ResourceAllocationException, ResourceUnavailableException, InsufficientCapacityException;
+
+    boolean stopContainerCluster(long containerClusterId) throws ManagementServerException;
+
+    boolean deleteContainerCluster(Long containerClusterId) throws ManagementServerException;
+
+    ListResponse<ApplicationClusterResponse>  listApplicationClusters(ListApplicationClusterCmd cmd);
+
+    ApplicationClusterResponse createContainerClusterResponse(long containerClusterId);
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVO.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVO.java
new file mode 100644
index 00000000000..d0f325b037a
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVO.java
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import java.util.Date;
+import java.util.UUID;
+
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+import com.cloud.utils.db.GenericDao;
+
+@Entity
+@Table(name = "sb_ccs_container_cluster")
+public class ApplicationClusterVO implements ApplicationCluster {
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public String getUuid() {
+        return uuid;
+    }
+
+    public void setUuid(String uuid) {
+        this.uuid = uuid;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    public long getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(long zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public long getServiceOfferingId() {
+        return serviceOfferingId;
+    }
+
+    public void setServiceOfferingId(long serviceOfferingId) {
+        this.serviceOfferingId = serviceOfferingId;
+    }
+
+    public long getTemplateId() {
+        return templateId;
+    }
+
+    public void setTemplateId(long templateId) {
+        this.templateId = templateId;
+    }
+
+    public long getNetworkId() {
+        return networkId;
+    }
+
+    public void setNetworkId(long networkId) {
+        this.networkId = networkId;
+    }
+
+    public long getDomainId() {
+        return domainId;
+    }
+
+    public void setDomainId(long domainId) {
+        this.domainId = domainId;
+    }
+
+    public long getAccountId() {
+        return accountId;
+    }
+
+    public void setAccountId(long accountId) {
+        this.accountId = accountId;
+    }
+
+    public long getNodeCount() {
+        return nodeCount;
+    }
+
+    public void setNodeCount(long nodeCount) {
+        this.nodeCount = nodeCount;
+    }
+
+    public long getCores() {
+        return cores;
+    }
+
+    public void setCores(long cores) {
+        this.cores = cores;
+    }
+
+    public long getMemory() {
+        return memory;
+    }
+
+    public void setMemory(long memory) {
+        this.memory = memory;
+    }
+
+    public State getState() {
+        return state;
+    }
+
+    public void setState(State state) {
+        this.state = state;
+    }
+
+    public String getEndpoint() {
+        return endpoint;
+    }
+
+    public void setEndpoint(String endpoint) {
+        this.endpoint = endpoint;
+    }
+
+    public String getKeyPair() {
+        return keyPair;
+    }
+
+    public void setKeyPair(String keyPair) {
+        this.keyPair = keyPair;
+    }
+
+    public String getConsoleEndpoint() {
+        return consoleEndpoint;
+    }
+
+    public void setConsoleEndpoint(String consoleEndpoint) {
+        this.consoleEndpoint = consoleEndpoint;
+    }
+
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    long id;
+
+    @Column(name = "uuid")
+    String uuid;
+
+    @Column(name = "name")
+    private String name;
+
+    @Column(name = "description", length = 4096)
+    private String description;
+
+    @Column(name = "zone_id")
+    long zoneId;
+
+    @Column(name = "service_offering_id")
+    long serviceOfferingId;
+
+    @Column(name = "template_id")
+    long templateId;
+
+    @Column(name = "network_id")
+    long networkId;
+
+    @Column(name = "domain_id")
+    protected long domainId;
+
+    @Column(name = "account_id")
+    protected long accountId;
+
+    @Column(name = "node_count")
+    long nodeCount;
+
+    @Column(name = "cores")
+    long cores;
+
+    @Column(name = "memory")
+    long memory;
+
+    @Column(name = "state")
+    State  state;
+
+    @Column(name = "key_pair")
+    String keyPair;
+
+    @Column(name = "endpoint")
+    String endpoint;
+
+    @Column(name = "console_endpoint")
+    String consoleEndpoint;
+
+    @Column(name = GenericDao.CREATED_COLUMN)
+    protected Date created;
+
+    @Column(name = GenericDao.REMOVED_COLUMN)
+    protected Date removed;
+
+    @Column(name = "gc")
+    boolean checkForGc;
+
+    public ApplicationClusterVO() {
+
+    }
+
+    public ApplicationClusterVO(String name, String description, long zoneId, long serviceOfferingId, long templateId,
+                            long networkId, long domainId, long accountId, long nodeCount, State state,
+                            String keyPair, long cores, long memory, String endpoint, String consoleEndpoint) {
+        this.uuid = UUID.randomUUID().toString();
+        this.name = name;
+        this.description = description;
+        this.zoneId = zoneId;
+        this.serviceOfferingId = serviceOfferingId;
+        this.templateId = templateId;
+        this.networkId = networkId;
+        this.domainId = domainId;
+        this.accountId = accountId;
+        this.nodeCount = nodeCount;
+        this.state = state;
+        this.keyPair = keyPair;
+        this.cores = cores;
+        this.memory = memory;
+        this.endpoint = endpoint;
+        this.consoleEndpoint = consoleEndpoint;
+        this.checkForGc = false;
+    }
+
+    public Class<?> getEntityType() {
+        return ApplicationCluster.class;
+    }
+
+    public boolean isDisplay() {
+        return true;
+    }
+
+
+    public Date getRemoved() {
+        if (removed == null)
+            return null;
+        return new Date(removed.getTime());
+    }
+
+    public boolean ischeckForGc() {
+        return checkForGc;
+    }
+
+    public void setCheckForGc(boolean check) {
+        checkForGc = check;
+    }
+
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMap.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMap.java
new file mode 100644
index 00000000000..6b32ea328fe
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMap.java
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+/**
+ * VirtualMachine describes the properties held by a virtual machine
+ *
+ */
+public interface ApplicationClusterVmMap {
+    long getId();
+    long getClusterId();
+    long getVmId();
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMapVO.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMapVO.java
new file mode 100644
index 00000000000..6d873e26ff7
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/ApplicationClusterVmMapVO.java
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import javax.persistence.Column;
+
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.Table;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+
+@Entity
+@Table(name = "sb_ccs_container_cluster_vm_map")
+public class ApplicationClusterVmMapVO implements ApplicationClusterVmMap {
+
+    public long getId() {
+        return id;
+    }
+
+    public long getClusterId() {
+        return clusterId;
+
+    }
+
+    public void setClusterId(long clusterId) {
+
+        this.clusterId = clusterId;
+    }
+
+    public long getVmId() {
+        return vmId;
+    }
+
+    public void setVmId(long vmId) {
+
+        this.vmId = vmId;
+    }
+
+    @Id
+    @GeneratedValue(strategy = GenerationType.IDENTITY)
+    @Column(name = "id")
+    long id;
+
+    @Column(name = "cluster_id")
+    long clusterId;
+
+    @Column(name = "vm_id")
+    long vmId;
+
+    public ApplicationClusterVmMapVO() {
+
+    }
+
+    public ApplicationClusterVmMapVO(long clusterId, long vmId) {
+        this.vmId = vmId;
+        this.clusterId = clusterId;
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDao.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDao.java
new file mode 100644
index 00000000000..a3316a491b8
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDao.java
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterVO;
+import com.cloud.utils.db.GenericDao;
+import com.cloud.utils.fsm.StateDao;
+
+import java.util.List;
+
+public interface ApplicationClusterDao extends GenericDao<ApplicationClusterVO, Long>,
+        StateDao<ApplicationCluster.State, ApplicationCluster.Event, ApplicationCluster> {
+
+    List<ApplicationClusterVO> listByAccount(long accountId);
+    List<ApplicationClusterVO> findClustersToGarbageCollect();
+    List<ApplicationClusterVO> findClustersInState(ApplicationCluster.State state);
+    List<ApplicationClusterVO> listByNetworkId(long networkId);
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDaoImpl.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDaoImpl.java
new file mode 100644
index 00000000000..f4f2fcf42ab
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDaoImpl.java
@@ -0,0 +1,100 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+import org.apache.cloudstack.applicationcluster.ApplicationCluster;
+import org.apache.cloudstack.applicationcluster.ApplicationClusterVO;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
+import org.springframework.stereotype.Component;
+
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.TransactionLegacy;
+
+import java.util.List;
+
+@Component
+public class ApplicationClusterDaoImpl extends GenericDaoBase<ApplicationClusterVO, Long> implements ApplicationClusterDao {
+
+    private final SearchBuilder<ApplicationClusterVO> AccountIdSearch;
+    private final SearchBuilder<ApplicationClusterVO> GarbageCollectedSearch;
+    private final SearchBuilder<ApplicationClusterVO> StateSearch;
+    private final SearchBuilder<ApplicationClusterVO> SameNetworkSearch;
+
+    public ApplicationClusterDaoImpl() {
+        AccountIdSearch = createSearchBuilder();
+        AccountIdSearch.and("account", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
+        AccountIdSearch.done();
+
+        GarbageCollectedSearch = createSearchBuilder();
+        GarbageCollectedSearch.and("gc", GarbageCollectedSearch.entity().ischeckForGc(), SearchCriteria.Op.EQ);
+        GarbageCollectedSearch.and("state", GarbageCollectedSearch.entity().getState(), SearchCriteria.Op.NEQ);
+        GarbageCollectedSearch.done();
+
+        StateSearch = createSearchBuilder();
+        StateSearch.and("state", StateSearch.entity().getState(), SearchCriteria.Op.EQ);
+        StateSearch.done();
+
+        SameNetworkSearch = createSearchBuilder();
+        SameNetworkSearch.and("network_id", SameNetworkSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
+        SameNetworkSearch.done();
+    }
+
+    @Override
+    public List<ApplicationClusterVO> listByAccount(long accountId) {
+        SearchCriteria<ApplicationClusterVO> sc = AccountIdSearch.create();
+        sc.setParameters("account", accountId);
+        return listBy(sc, null);
+    }
+
+    @Override
+    public List<ApplicationClusterVO> findClustersToGarbageCollect() {
+        SearchCriteria<ApplicationClusterVO> sc = GarbageCollectedSearch.create();
+        sc.setParameters("gc", true);
+        sc.setParameters("state", ApplicationCluster.State.Destroying);
+        return listBy(sc);
+    }
+
+    @Override
+    public List<ApplicationClusterVO> findClustersInState(ApplicationCluster.State state) {
+        SearchCriteria<ApplicationClusterVO> sc = StateSearch.create();
+        sc.setParameters("state", state);
+        return listBy(sc);
+    }
+
+    @Override
+    public boolean updateState(ApplicationCluster.State currentState, ApplicationCluster.Event event, ApplicationCluster.State nextState,
+                               ApplicationCluster vo, Object data) {
+        // TODO: ensure this update is correct
+        TransactionLegacy txn = TransactionLegacy.currentTxn();
+        txn.start();
+
+        ApplicationClusterVO ccVo = (ApplicationClusterVO)vo;
+        ccVo.setState(nextState);
+        super.update(ccVo.getId(), ccVo);
+
+        txn.commit();
+        return true;
+    }
+
+    public List<ApplicationClusterVO> listByNetworkId(long networkId) {
+        SearchCriteria<ApplicationClusterVO> sc = SameNetworkSearch.create();
+        sc.setParameters("network_id", networkId);
+        return this.listBy(sc);
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDao.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDao.java
new file mode 100644
index 00000000000..abd67890ed6
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDao.java
@@ -0,0 +1,26 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterDetailsVO;
+import com.cloud.utils.db.GenericDao;
+
+
+public interface ApplicationClusterDetailsDao extends GenericDao<ApplicationClusterDetailsVO, Long> {
+    public ApplicationClusterDetailsVO findByClusterId(long clusterId);
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDaoImpl.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDaoImpl.java
new file mode 100644
index 00000000000..1dd66b2ee8f
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterDetailsDaoImpl.java
@@ -0,0 +1,44 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterDetailsVO;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import org.springframework.stereotype.Component;
+
+import com.cloud.utils.db.GenericDaoBase;
+
+
+@Component
+public class ApplicationClusterDetailsDaoImpl extends GenericDaoBase<ApplicationClusterDetailsVO, Long> implements ApplicationClusterDetailsDao {
+
+    private final SearchBuilder<ApplicationClusterDetailsVO> clusterIdSearch;
+
+    public ApplicationClusterDetailsDaoImpl() {
+        clusterIdSearch = createSearchBuilder();
+        clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
+        clusterIdSearch.done();
+    }
+
+    @Override
+    public ApplicationClusterDetailsVO findByClusterId(long clusterId) {
+        SearchCriteria<ApplicationClusterDetailsVO> sc = clusterIdSearch.create();
+        sc.setParameters("clusterId", clusterId);
+        return findOneBy(sc);
+    }
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDao.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDao.java
new file mode 100644
index 00000000000..7a030de4ccf
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDao.java
@@ -0,0 +1,26 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterVmMapVO;
+import com.cloud.utils.db.GenericDao;
+
+import java.util.List;
+
+public interface ApplicationClusterVmMapDao extends GenericDao<ApplicationClusterVmMapVO, Long> {
+    public List<ApplicationClusterVmMapVO> listByClusterId(long clusterId);
+}
diff --git a/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDaoImpl.java b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDaoImpl.java
new file mode 100644
index 00000000000..fd2fffadc5f
--- /dev/null
+++ b/plugins/application-clusters/src/org/apache/cloudstack/applicationcluster/dao/ApplicationClusterVmMapDaoImpl.java
@@ -0,0 +1,46 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster.dao;
+
+import org.apache.cloudstack.applicationcluster.ApplicationClusterVmMapVO;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+import org.springframework.stereotype.Component;
+
+import com.cloud.utils.db.GenericDaoBase;
+
+import java.util.List;
+
+
+@Component
+public class ApplicationClusterVmMapDaoImpl extends GenericDaoBase<ApplicationClusterVmMapVO, Long> implements ApplicationClusterVmMapDao {
+
+    private final SearchBuilder<ApplicationClusterVmMapVO> clusterIdSearch;
+
+    public ApplicationClusterVmMapDaoImpl() {
+        clusterIdSearch = createSearchBuilder();
+        clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
+        clusterIdSearch.done();
+    }
+
+    @Override
+    public List<ApplicationClusterVmMapVO> listByClusterId(long clusterId) {
+        SearchCriteria<ApplicationClusterVmMapVO> sc = clusterIdSearch.create();
+        sc.setParameters("clusterId", clusterId);
+        return listBy(sc, null);
+    }
+}
diff --git a/plugins/application-clusters/test/applicationClustersContext.xml b/plugins/application-clusters/test/applicationClustersContext.xml
new file mode 100644
index 00000000000..a905096ed78
--- /dev/null
+++ b/plugins/application-clusters/test/applicationClustersContext.xml
@@ -0,0 +1,45 @@
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
+  license agreements. See the NOTICE file distributed with this work for additional
+  information regarding copyright ownership. The ASF licenses this file to
+  you under the Apache License, Version 2.0 (the "License"); you may not use
+  this file except in compliance with the License. You may obtain a copy of
+  the License at http://www.apache.org/licenses/LICENSE-2.0 Unless require
+  by applicable law or agreed to in writing, software distributed under the
+  License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+  OF ANY KIND, either express or implied. See the License for the specific
+  language governing permissions and limitations under the License. -->
+<beans xmlns="http://www.springframework.org/schema/beans"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
+  xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop"
+  xsi:schemaLocation="http://www.springframework.org/schema/beans
+                      http://www.springframework.org/schema/beans/spring-beans.xsd
+                      http://www.springframework.org/schema/tx
+                      http://www.springframework.org/schema/tx/spring-tx.xsd
+                      http://www.springframework.org/schema/aop
+                      http://www.springframework.org/schema/aop/spring-aop.xsd
+                      http://www.springframework.org/schema/context
+                      http://www.springframework.org/schema/context/spring-context.xsd">
+
+     <context:annotation-config />
+     <context:component-scan
+     base-package="org.apache.cloudstack.applicationcluster"/>
+
+  <bean id="componentContext" class="com.cloud.utils.component.ComponentContext" />
+  <bean id="transactionContextBuilder" class="com.cloud.utils.db.TransactionContextBuilder" />
+  <bean id="actionEventInterceptor" class="com.cloud.event.ActionEventInterceptor" />
+  <bean id="instantiatePostProcessor" class="com.cloud.utils.component.ComponentInstantiationPostProcessor">
+    <property name="Interceptors">
+        <list>
+            <ref bean="transactionContextBuilder" />
+            <ref bean="actionEventInterceptor" />
+        </list>
+    </property>
+  </bean>
+
+    <bean id="ConfigurationManager" class="com.cloud.configuration.ConfigurationManagerImpl">
+        <property name="name" value="ConfigurationManager"/>
+    </bean>
+
+    <bean class="org.apache.cloudstack.applicationcluster.ApplicationClusterManagerImplTestConfiguration" />
+
+</beans>
diff --git a/plugins/application-clusters/test/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImplTest.java b/plugins/application-clusters/test/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImplTest.java
new file mode 100644
index 00000000000..42d60c49a05
--- /dev/null
+++ b/plugins/application-clusters/test/org/apache/cloudstack/applicationcluster/ApplicationClusterManagerImplTest.java
@@ -0,0 +1,264 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.applicationcluster;
+
+import com.cloud.capacity.CapacityManager;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDao;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterDetailsDao;
+import org.apache.cloudstack.applicationcluster.dao.ApplicationClusterVmMapDao;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter.NetworkType;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.deploy.DeployDestination;
+import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.host.Host.Type;
+import com.cloud.host.HostVO;
+import com.cloud.host.Status;
+import com.cloud.network.NetworkModel;
+import com.cloud.network.NetworkService;
+import com.cloud.network.dao.IPAddressDao;
+import com.cloud.network.dao.NetworkDao;
+import com.cloud.network.dao.PhysicalNetworkDao;
+import com.cloud.network.firewall.FirewallService;
+import com.cloud.network.rules.RulesService;
+import com.cloud.network.rules.dao.PortForwardingRulesDao;
+import com.cloud.offerings.dao.NetworkOfferingDao;
+import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
+import com.cloud.resource.ResourceManager;
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.user.AccountManager;
+import com.cloud.user.dao.AccountDao;
+import com.cloud.user.dao.SSHKeyPairDao;
+import com.cloud.vm.UserVmService;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.UserVmDao;
+import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyFloat;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.when;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = {"classpath*:**/applicationClustersContext.xml"})
+public class ApplicationClusterManagerImplTest {
+    public static final Logger s_logger = Logger.getLogger(ApplicationClusterManagerImplTest.class);
+
+    @Spy
+    ApplicationClusterManagerImpl ccManager = new ApplicationClusterManagerImpl();
+
+    @Mock
+    ApplicationClusterDao applicationClusterDao;
+    @Mock
+    ApplicationClusterVmMapDao clusterVmMapDao;
+    @Mock
+    ApplicationClusterDetailsDao applicationClusterDetailsDao;
+    @Mock
+    protected SSHKeyPairDao sshKeyPairDao;
+    @Mock
+    public UserVmService userVmService;
+    @Mock
+    protected DataCenterDao dcDao;
+    @Mock
+    protected ServiceOfferingDao offeringDao;
+    @Mock
+    protected VMTemplateDao templateDao;
+    @Mock
+    protected AccountDao accountDao;
+    @Mock
+    private UserVmDao vmDao;
+    @Mock
+    ConfigurationDao globalConfigDao;
+    @Mock
+    NetworkService networkService;
+    @Mock
+    NetworkOfferingDao networkOfferingDao;
+    @Mock
+    protected NetworkModel networkModel;
+    @Mock
+    PhysicalNetworkDao physicalNetworkDao;
+    @Mock
+    protected NetworkOrchestrationService networkMgr;
+    @Mock
+    protected NetworkDao networkDao;
+    @Mock
+    private IPAddressDao publicIpAddressDao;
+    @Mock
+    PortForwardingRulesDao portForwardingDao;
+    @Mock
+    private FirewallService firewallService;
+    @Mock
+    public RulesService rulesService;
+    @Mock
+    public NetworkOfferingServiceMapDao ntwkOfferingServiceMapDao;
+    @Mock
+    public AccountManager accountMgr;
+    @Mock
+    public ApplicationClusterVmMapDao applicationClusterVmMapDao;
+    @Mock
+    public ServiceOfferingDao srvOfferingDao;
+    @Mock
+    public UserVmDao userVmDao;
+    @Mock
+    public CapacityManager capacityMgr;
+    @Mock
+    public ResourceManager resourceMgr;
+    @Mock
+    public ClusterDetailsDao clusterDetailsDao;
+    @Mock
+    public ClusterDao clusterDao;
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+        ccManager._applicationClusterDao = applicationClusterDao;
+        ccManager._clusterVmMapDao = clusterVmMapDao;
+        ccManager._applicationClusterDetailsDao = applicationClusterDetailsDao;
+        ccManager._sshKeyPairDao = sshKeyPairDao;
+        ccManager._userVmService = userVmService;
+        ccManager._dcDao = dcDao;
+        ccManager._offeringDao = offeringDao;
+        ccManager._templateDao = templateDao;
+        ccManager._accountDao = accountDao;
+        ccManager._globalConfigDao = globalConfigDao;
+        ccManager._networkService = networkService;
+        ccManager._networkOfferingDao = networkOfferingDao;
+        ccManager._networkMgr = networkMgr;
+        ccManager._physicalNetworkDao = physicalNetworkDao;
+        ccManager._networkMgr = networkMgr;
+        ccManager._portForwardingDao = portForwardingDao;
+        ccManager._rulesService = rulesService;
+        ccManager._srvOfferingDao = srvOfferingDao;
+        ccManager._userVmDao = userVmDao;
+        ccManager._capacityMgr = capacityMgr;
+        ccManager._resourceMgr = resourceMgr;
+        ccManager._clusterDetailsDao = clusterDetailsDao;
+        ccManager._clusterDao = clusterDao;
+    }
+
+    @Test(expected = InsufficientServerCapacityException.class)
+    public void checkPlanWithNoHostInDC() throws InsufficientServerCapacityException {
+        ApplicationClusterVO containerCluster = new ApplicationClusterVO();
+        containerCluster.setServiceOfferingId(1L);
+        containerCluster.setNodeCount(5);
+        when(applicationClusterDao.findById(1L)).thenReturn(containerCluster);
+        ServiceOfferingVO offering = new ServiceOfferingVO("test", 1, 500, 512, 0, 0, true, "test", null, false, true, "", true, VirtualMachine.Type.User, true);
+        when(srvOfferingDao.findById(1L)).thenReturn(offering);
+
+        List<HostVO> hl = new ArrayList<HostVO>();
+        when(resourceMgr.listAllHostsInAllZonesByType(Type.Routing)).thenReturn(hl);
+
+        ClusterVO cluster = new ClusterVO(1L);
+        when(clusterDao.findById(1L)).thenReturn(cluster);
+
+        ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(1L, "cpuOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio")).thenReturn(cluster_detail_cpu);
+        ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(1L, "memoryOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio")).thenReturn(cluster_detail_ram);
+
+        when(capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyInt(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(true);
+
+        ccManager.plan(1, 1);
+    }
+
+    @Test
+    public void checkPlanWithHostInDC() throws InsufficientServerCapacityException {
+        ApplicationClusterVO containerCluster = new ApplicationClusterVO();
+        containerCluster.setServiceOfferingId(1L);
+        containerCluster.setNodeCount(0);
+        when(applicationClusterDao.findById(1L)).thenReturn(containerCluster);
+        ServiceOfferingVO offering = new ServiceOfferingVO("test", 1, 500, 512, 0, 0, true, "test", null, false, true, "", true, null, true);
+        when(srvOfferingDao.findById(1L)).thenReturn(offering);
+
+        List<HostVO> hl = new ArrayList<HostVO>();
+        HostVO h1 = new HostVO(1L, "testHost1", Type.Routing, "", "", "", "", "", "", "", "", "", "", "", "", "", Status.Up, "1.0", "", new Date(), 1L, 1L, 1L, 1L, "", 1L,
+                StoragePoolType.Filesystem);
+        h1.setClusterId(1L);
+        h1.setUuid("uuid-test");
+        hl.add(h1);
+        when(resourceMgr.listAllHostsInOneZoneByType(Type.Routing, 1)).thenReturn(hl);
+        ClusterVO cluster = new ClusterVO(1L);
+        when(clusterDao.findById(1L)).thenReturn(cluster);
+
+        ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(1L, "cpuOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio")).thenReturn(cluster_detail_cpu);
+        ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(1L, "memoryOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio")).thenReturn(cluster_detail_ram);
+
+        when(capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyInt(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(true);
+        when(dcDao.findById(1L)).thenReturn(new DataCenterVO(1L, "test-dc", "test-desc", "", "", "", "", "", "", 1L, NetworkType.Advanced, "", ""));
+
+        DeployDestination dd = ccManager.plan(1, 1);
+
+        Assert.assertEquals(dd.getDataCenter().getId(), 1L);
+    }
+
+    @Test(expected = InsufficientServerCapacityException.class)
+    public void checkPlanWithHostInDCNoCapacity() throws InsufficientServerCapacityException {
+        ApplicationClusterVO containerCluster = new ApplicationClusterVO();
+        containerCluster.setServiceOfferingId(1L);
+        containerCluster.setNodeCount(0);
+        when(applicationClusterDao.findById(1L)).thenReturn(containerCluster);
+        ServiceOfferingVO offering = new ServiceOfferingVO("test", 1, 500, 512, 0, 0, true, "test", null, false, true, "", true, VirtualMachine.Type.User, true);
+        when(srvOfferingDao.findById(1L)).thenReturn(offering);
+
+        List<HostVO> hl = new ArrayList<HostVO>();
+        HostVO h1 = new HostVO(1L, "testHost1", Type.Routing, "", "", "", "", "", "", "", "", "", "", "", "", "", Status.Up, "1.0", "", new Date(), 1L, 1L, 1L, 1L, "", 1L,
+                StoragePoolType.Filesystem);
+        h1.setClusterId(1L);
+        h1.setUuid("uuid-test");
+        hl.add(h1);
+        when(resourceMgr.listAllHostsInAllZonesByType(Type.Routing)).thenReturn(hl);
+
+        ClusterVO cluster = new ClusterVO(1L);
+        when(clusterDao.findById(1L)).thenReturn(cluster);
+
+        ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(1L, "cpuOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio")).thenReturn(cluster_detail_cpu);
+        ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(1L, "memoryOvercommitRatio", "1");
+        when(clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio")).thenReturn(cluster_detail_ram);
+
+        when(capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyInt(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false);
+        when(dcDao.findById(1L)).thenReturn(new DataCenterVO(1L, "test-dc", "test-desc", "", "", "", "", "", "", 1L, NetworkType.Advanced, "", ""));
+
+        DeployDestination dd = ccManager.plan(1, 1);
+    }
+
+}
diff --git a/plugins/application-clusters/test/test_container_cluster_life_cycle.py b/plugins/application-clusters/test/test_container_cluster_life_cycle.py
new file mode 100644
index 00000000000..020bb461d5b
--- /dev/null
+++ b/plugins/application-clusters/test/test_container_cluster_life_cycle.py
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import marvin
+from marvin.cloudstackTestCase import *
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+class TestContainerClusterLifeCycle(cloudstackTestCase):
+    """
+        Tests for container cluster life cycle operations
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        testClient = super(TestContainerClusterLifeCycle, cls).getClsTestClient()
+        cls.apiclient = testClient.getApiClient()
+        cls.services = testClient.getParsedTestDataConfig()
+
+        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+        cls.service_offering = ServiceOffering.create(
+            cls.apiclient,
+            cls.services["service_offerings"]
+        )
+        cls.container_cluster = ContainerCluster.create(
+            cls.apiclient,
+            name="TestContainerCluster",
+            zoneid=cls.zone.id,
+            serviceofferingid=cls.service_offering.id,
+            size=2)
+
+        cls._cleanup = [cls.small_offering]
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.apiclient = super(TestContainerClusterLifeCycle, cls).getClsTestClient().getApiClient()
+        try:
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+
+    def tearDown(self):
+        try:
+            #Clean up, terminate the created ISOs
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_01_created_vm_state(self):
+        """Test state of container cluster is running state after creation
+        """
+        self.assertEqual(self.container_cluster.state, "Running")
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_02_stop_container_cluster(self):
+        """Test state of container cluster is stopped state after performing stop
+        """
+        try:
+            self.container_cluster.stop(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to stop container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ContainerCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Stopped",
+                            "Check Container Cluster is in Stopped state"
+                        )
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_03_start_container_cluster(self):
+        """Test state of container cluster is Running state after performing start
+        """
+        try:
+            self.container_cluster.start(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to start container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ContainerCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Running",
+                            "Check Container Cluster is in Stopped state"
+                        )
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_04_destroy_container_cluster(self):
+        """Test destroy container cluster
+        """
+        try:
+            self.container_cluster.delete(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to delete container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ContainerCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Destroyed",
+                            "Check Container Cluster is in Destroyed state"
+                        )
+        return
\ No newline at end of file
diff --git a/plugins/application-clusters/test/test_container_cluster_provisioning.py b/plugins/application-clusters/test/test_container_cluster_provisioning.py
new file mode 100644
index 00000000000..7bad3fcc86e
--- /dev/null
+++ b/plugins/application-clusters/test/test_container_cluster_provisioning.py
@@ -0,0 +1,287 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import marvin
+from marvin.cloudstackTestCase import *
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+class TestContainerClusterProvisioning(cloudstackTestCase):
+    """
+        Tests for container cluster provisioning 
+    """
+    
+    def setUp(self):
+        testClient = super(TestContainerClusterProvisioning, self).getClsTestClient()
+
+        self.apiclient = testClient.getApiClient()
+        self.dbclient = testClient.getDbConnection()
+
+        self.zone = get_zone(self.apiclient, testClient.getZoneForTests())
+        
+        self.service_offering = self.get_service_offering()
+        
+        self.ids_to_clean = []
+
+    
+    def tearDown(self):
+        if self.ids_to_clean is not None:
+            for id in self.ids_to_clean:
+                try:
+                    self.delete_cc(id)
+                except Exception:
+                    pass
+                else:
+                    print("Could not managed to delete Container Cluster with ID: " + id)    
+                    
+    def delete_cc(self, id):
+        cmd = self.getDeleteCCCmd(id)
+        response = self.apiclient.deleteContainerCluster(cmd)
+        self.assertEqual(response.success, True)
+        
+      
+    def getDeleteCCCmd(self, id):
+        cmd = deleteContainerCluster.deleteContainerClusterCmd()
+        cmd.id = id
+        
+        return cmd
+        
+    def get_service_offering(self):
+        response = list_service_offering(self.apiclient)
+        
+        if len(response) > 0:
+            self.service_offering = response[0]
+            return self.service_offering
+        else:
+            raise self.skipTest("No service offering found, skipping test")
+    
+    
+    def getCreateContainerClusterCmd(self):
+        cmd = createContainerCluster.createContainerClusterCmd()
+        cmd.name = "TestCluester"
+        cmd.serviceofferingid = self.service_offering.id
+        cmd.zoneid = self.zone.id
+        cmd.size = 1
+                
+        return cmd
+    
+
+    def getListContainerClusterCmd(self):
+        cmd = listContainerCluster.listContainerClusterCmd()
+        
+        return cmd
+
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_container_cluster_valid(self):
+        """
+            This is a valid scenario of containing a container cluster
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+
+        #Execute the API Command
+        response = ContainerCluster.create(
+                                           self.apiclient,
+                                           cmd.name, 
+                                           cmd.zoneid, 
+                                           cmd.serviceofferingid, 
+                                           cmd.size)
+        
+        #Check if job is successful
+        self.ids_to_clean.append("afb8541b-c461-4afe-b9d9-a64e2444f73c")
+        self.ids_to_clean.append(response.id)
+        
+        self.assertEqual(response.state, "Running")
+        
+        
+
+    
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_cc_with_name_emptyString(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.name = ''
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_cc_without_name(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.name = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+        
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")       
+    def test_create_cc_without_zoneid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.zoneid = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+                
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_without_serviceoffering(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.serviceofferingid = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+     
+     
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_without_clustersize(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.size = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+        
+   
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_clustersize(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.size = "Invalid-cluster-size"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+            
+            
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_zoneid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.zoneid = "Some-invalid-zoneid"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_serviceofferingid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.serviceofferingid = "Some-invalid-service-offering_id"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")       
+    def test_create_cc_with_size_0(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+        cmd.size = 0
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createContainerCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+                
diff --git a/plugins/application-clusters/test/test_provisioning_and_deployment.py b/plugins/application-clusters/test/test_provisioning_and_deployment.py
new file mode 100644
index 00000000000..d1ad64e2968
--- /dev/null
+++ b/plugins/application-clusters/test/test_provisioning_and_deployment.py
@@ -0,0 +1,189 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import marvin
+
+from marvin.cloudstackTestCase import *
+from marvin.sshClient import SshClient
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+
+class TestProvisioningAndDeployment(cloudstackTestCase):
+    """
+    Test
+    """
+
+    def setUp(self):
+        testClient = super(TestProvisioningAndDeployment, self).getClsTestClient()
+        self.apiclient = testClient.getApiClient()
+        self.mgtSvrDetails = self.config.__dict__["mgtSvr"][0].__dict__        
+
+        self.zone = get_zone(self.apiclient, testClient.getZoneForTests())
+        
+        self.service_offering = self.get_service_offering()
+        
+        self.cluster_endpoint = ""
+        self.cluster_password = ""
+        
+        self.ids_to_clean = []
+        #self.ids_to_clean.append("9e89d5ef-b31a-415e-b71d-2342cf69d308")
+
+    
+    def tearDown(self):
+        if self.ids_to_clean is not None:
+            for id in self.ids_to_clean:
+                try:
+                   self.delete_cc(id)
+                except Exception:
+                    pass
+                else:
+                    print("Could not managed to delete Container Cluster with ID: " + id)    
+                    
+    def delete_cc(self, id):
+        cmd = self.getDeleteCCCmd(id)
+        response = self.apiclient.deleteContainerCluster(cmd)
+        self.assertEqual(response.success, True)
+        return
+      
+    def getDeleteCCCmd(self, id):
+        cmd = deleteContainerCluster.deleteContainerClusterCmd()
+        cmd.id = id
+        
+        return cmd
+        
+    def get_service_offering(self):
+        response = list_service_offering(self.apiclient)
+        
+        if len(response) > 0:
+            self.service_offering = response[0]
+            return self.service_offering
+        else:
+            raise self.skipTest("No service offering found, skipping test")
+    
+    
+    def getCreateContainerClusterCmd(self):
+        cmd = createContainerCluster.createContainerClusterCmd()
+        
+        cmd.name = "TestCluster"
+        cmd.serviceofferingid = self.service_offering.id
+        cmd.zoneid = self.zone.id
+        cmd.size = 1
+                
+        return cmd
+    
+    def stopCluster(self):
+        cmd = stopContainerCluster.stopContainerClusterCmd()
+        cmd.id = self.ids_to_clean[0]
+        print(cmd.id)
+        
+        response = ContainerCluster.stop(self.apiclient, cmd)
+        return response.success
+            
+    def startCluster(self):
+        cmd = startContainerCluster.startContainerClusterCmd()
+        cmd.id = self.ids_to_clean[0]
+        print(cmd.id)
+
+        response = ContainerCluster.start(self.apiclient, cmd)
+        return response.success
+    
+    def createContainerCluster(self):
+        #Create API Command
+        cmd = self.getCreateContainerClusterCmd()
+
+        #Execute the API Command
+        response = ContainerCluster.create(
+                                           self.apiclient,
+                                           cmd.name, 
+                                           cmd.zoneid, 
+                                           cmd.serviceofferingid, 
+                                           cmd.size)
+        
+        #Check if job is successful
+        self.ids_to_clean.append(response.id)
+        
+        self.assertEqual(response.state, "Running")
+        
+        self.cluster_endpoint = response.endpoint
+        self.cluster_password = response.password
+        
+        return response.success
+    
+    
+    def deployRedis(self):
+        sshClient = SshClient(host=self.mgtSvrDetails["mgtSvrIp"], port=22, user=self.mgtSvrDetails["user"], passwd=self.mgtSvrDetails["passwd"]) 
+        
+        sshClient.execute("mkdir redis-example ")
+        sshClient.execute("wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-controller.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-master.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-proxy.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-sentinel-controller.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-sentinel-service.yaml " +
+                                                                "-P /root/redis-example/")
+        sshClient.execute("sed -i -e 's/redis:v2/redis:v1/g' /root/redis-example/*.yaml")
+        sshClient.execute("mkdir redis-example/image")
+        sshClient.execute("wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/Dockerfile " +
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/redis-master.conf " +
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/redis-slave.conf " + 
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/run.sh -P /root/redis-example/image/")
+        
+        sshClient.execute("kubectl create -f /root/redis-example/redis-master.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-sentinel-service.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-controller.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-sentinel-controller.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+
+        res = sshClient.execute("kubectl scale rc redis --replicas=3 -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        self.assertEqual(res[0], "replicationcontroller \"redis\" scaled")
+
+        res = sshClient.execute("kubectl scale rc redis-sentinel --replicas=3 -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        self.assertEqual(res[0], "replicationcontroller \"redis-sentinel\" scaled")
+
+        res = sshClient.execute("kubectl delete pods redis-master -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password )
+        self.assertEqual(res[0], "pod \"redis-master\" deleted")
+
+        sshClient.execute("rm -rf /root/redis-example/")
+       
+    
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_container_cluster_valid(self):
+        """
+            This is a valid scenario of containing a container cluster
+        """
+        
+        self.deployRedis()
+       
+        
+   
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg", "custom"], required_hardware="false")   
+    def test_stop_start_cc(self):
+        """
+            Test scenario for stopping and starting a running cluster
+        """
+        self.createContainerCluster()
+        
+        self.assertEqual(True, self.stopCluster())
+        
+        self.assertEqual(True, self.startCluster())
+        
+        
\ No newline at end of file
diff --git a/plugins/pom.xml b/plugins/pom.xml
index bd737172cbd..573ce0fcfab 100755
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@ -46,6 +46,7 @@
     </plugins>
   </build>
   <modules>
+    <module>application-clusters</module>
     <module>api/rate-limit</module>
     <module>api/solidfire-intg-test</module>
     <module>api/discovery</module>
diff --git a/setup/db/db/schema-41000to41100-cleanup.sql b/setup/db/db/schema-41000to41100-cleanup.sql
new file mode 100644
index 00000000000..963fda41723
--- /dev/null
+++ b/setup/db/db/schema-41000to41100-cleanup.sql
@@ -0,0 +1,21 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema cleanup from 4.9.1.0 to 4.9.2.0;
+--;
+
diff --git a/setup/db/db/schema-41000to41100.sql b/setup/db/db/schema-41000to41100.sql
new file mode 100644
index 00000000000..0b5e82191e0
--- /dev/null
+++ b/setup/db/db/schema-41000to41100.sql
@@ -0,0 +1,111 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--   http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+--;
+-- Schema upgrade from 4.10.0.0 to 4.11.0.0;
+--;
+
+use cloud;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`application_cluster` (
+    `id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
+    `uuid` varchar(40),
+    `name` varchar(255) NOT NULL,
+    `description` varchar(4096) COMMENT 'display text for this container cluster',
+    `zone_id` bigint unsigned NOT NULL COMMENT 'zone id',
+    `service_offering_id` bigint unsigned COMMENT 'service offering id for the cluster VM',
+    `template_id` bigint unsigned COMMENT 'vm_template.id',
+    `network_id` bigint unsigned COMMENT 'network this container cluster uses',
+    `node_count` bigint NOT NULL default '0',
+    `account_id` bigint unsigned NOT NULL COMMENT 'owner of this cluster',
+    `domain_id` bigint unsigned NOT NULL COMMENT 'owner of this cluster',
+    `state` char(32) NOT NULL COMMENT 'current state of this cluster',
+    `key_pair` varchar(40),
+    `cores` bigint unsigned NOT NULL COMMENT 'number of cores',
+    `memory` bigint unsigned NOT NULL COMMENT 'total memory',
+    `endpoint` varchar(255) COMMENT 'url endpoint of the container cluster manager api access',
+    `console_endpoint` varchar(255) COMMENT 'url for the container cluster manager dashbaord',
+    `created` datetime NOT NULL COMMENT 'date created',
+    `removed` datetime COMMENT 'date removed if not null',
+    `gc` tinyint unsigned NOT NULL DEFAULT 1 COMMENT 'gc this container cluster or not',
+
+    CONSTRAINT `fk_cluster__zone_id` FOREIGN KEY `fk_cluster__zone_id` (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__service_offering_id` FOREIGN KEY `fk_cluster__service_offering_id` (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__template_id` FOREIGN KEY `fk_cluster__template_id`(`template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE,
+    CONSTRAINT `fk_cluster__network_id` FOREIGN KEY `fk_cluster__network_id`(`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE,
+
+    PRIMARY KEY(`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`application_cluster_vm_map` (
+    `id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
+    `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id',
+    `vm_id` bigint unsigned NOT NULL COMMENT 'vm id',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `application_cluster_vm_map_cluster__id` FOREIGN KEY `application_cluster_vm_map_cluster__id`(`cluster_id`) REFERENCES `application_cluster`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `cloud`.`application_cluster_details` (
+    `id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
+    `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id',
+    `username` varchar(255) NOT NULL,
+    `password` varchar(255) NOT NULL,
+    `registry_username` varchar(255),
+    `registry_password` varchar(255),
+    `registry_url` varchar(255),
+    `registry_email` varchar(255),
+    `network_cleanup` tinyint unsigned NOT NULL DEFAULT 1 COMMENT 'true if network needs to be clean up on deletion of container cluster. Should be false if user specfied network for the cluster',
+
+    PRIMARY KEY(`id`),
+    CONSTRAINT `application_cluster_details_cluster__id` FOREIGN KEY `application_cluster_details_cluster__id`(`cluster_id`) REFERENCES `application_cluster`(`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server',
+'cloud.application.cluster.template.name', "ShapeBlue-ACS-Template", 'template name', '-1', NULL, NULL, 0);
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server',
+'cloud.application.cluster.master.cloudconfig', '/etc/cloudstack/management/k8s-master.yml' , 'file location path of the cloud config used for creating container cluster master node', '/etc/cloudstack/management/k8s-master.yml', NULL , NULL, 0);
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server',
+'cloud.application.cluster.node.cloudconfig', '/etc/cloudstack/management/k8s-node.yml', 'file location path of the cloud config used for creating container cluster node', '/etc/cloudstack/management/k8s-node.yml', NULL , NULL, 0);
+
+INSERT IGNORE INTO `cloud`.`network_offerings` (name, uuid, unique_name, display_text, nw_rate, mc_rate, traffic_type, tags, system_only, specify_vlan, service_offering_id, conserve_mode, created,availability, dedicated_lb_service, shared_source_nat_service, sort_key, redundant_router_service, state, guest_type, elastic_ip_service, eip_associate_public_ip, elastic_lb_service, specify_ip_ranges, inline,is_persistent,internal_lb, public_lb, egress_default_policy, concurrent_connections, keep_alive_enabled, supports_streched_l2, `default`, removed) VALUES ('DefaultNetworkOfferingforClusterService', UUID(), 'DefaultNetworkOfferingforClusterService', 'Network Offering used for CloudStack container service', NULL,NULL,'Guest',NULL,0,0,NULL,1,now(),'Required',1,0,0,0,'Enabled','Isolated',0,1,0,0,0,0,0,1,1,NULL,0,0,0,NULL);
+
+UPDATE `cloud`.`network_offerings` SET removed=NULL WHERE unique_name='DefaultNetworkOfferingforClusterService';
+
+SET @ccsntwk = (select id from network_offerings where name='DefaultNetworkOfferingforClusterService' and removed IS NULL);
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Dhcp','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Dns','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Firewall','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Gateway','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Lb','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'PortForwarding','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'SourceNat','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'StaticNat','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'UserData','VirtualRouter',now());
+INSERT IGNORE INTO ntwk_offering_service_map (network_offering_id, service, provider, created) VALUES (@ccsntwk, 'Vpn','VirtualRouter',now());
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server',
+'cloud.application.cluster.network.offering', 'DefaultNetworkOfferingforClusterService' , 'Network Offering used for CloudStack container service', 'DefaultNetworkOfferingforClusterService', NULL , NULL, 0);
+
+INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (255, UUID(), 7, 'CoreOS', utc_timestamp());
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'XenServer', 'default', 'CoreOS', 255, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'VmWare', 'default', 'coreos64Guest', 255, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'CoreOS', 255, utc_timestamp(), 0);
+
+INSERT INTO `cloud`.`vm_template` (uuid, unique_name, name, public, featured, created, state, type, hvm, bits, account_id, url, enable_password, display_text, format, guest_os_id, cross_zones, hypervisor_type, extractable)  VALUES (UUID(), 'ACS Template KVM', 'ACS Template KVM', 1, 1, now(), 'Active', 'BUILTIN', 0, 64, 1, 'http:--dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2',  0, 'Cloudstack Application Cluster Service Template (KVM)', 'QCOW2', 99, 1, 'KVM',1);
diff --git a/test/integration/component/application_clusters/test_application_cluster_life_cycle.py b/test/integration/component/application_clusters/test_application_cluster_life_cycle.py
new file mode 100644
index 00000000000..e2c7677e48c
--- /dev/null
+++ b/test/integration/component/application_clusters/test_application_cluster_life_cycle.py
@@ -0,0 +1,175 @@
+# Copyright 2016 ShapeBlue Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import marvin
+from marvin.cloudstackTestCase import *
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+class TestApplicationClusterLifeCycle(cloudstackTestCase):
+    """
+        Tests for container cluster life cycle operations
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        testClient = super(TestApplicationClusterLifeCycle, cls).getClsTestClient()
+        cls.apiclient = testClient.getApiClient()
+        cls.services = testClient.getParsedTestDataConfig()
+
+        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+        cls.service_offering = ServiceOffering.create(
+            cls.apiclient,
+            cls.services["service_offerings"]
+        )
+        cls.container_cluster = ApplicationCluster.create(
+            cls.apiclient,
+            name="TestApplicationCluster",
+            zoneid=cls.zone.id,
+            serviceofferingid=cls.service_offering.id,
+            size=2)
+
+        cls._cleanup = [cls.small_offering]
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.apiclient = super(TestApplicationClusterLifeCycle, cls).getClsTestClient().getApiClient()
+        try:
+            cleanup_resources(cls.apiclient, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+
+    def tearDown(self):
+        try:
+            #Clean up, terminate the created ISOs
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_01_created_vm_state(self):
+        """Test state of container cluster is running state after creation
+        """
+        self.assertEqual(self.container_cluster.state, "Running")
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_02_stop_container_cluster(self):
+        """Test state of container cluster is stopped state after performing stop
+        """
+        try:
+            self.container_cluster.stop(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to stop container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ApplicationCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Stopped",
+                            "Check Application Cluster is in Stopped state"
+                        )
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_03_start_container_cluster(self):
+        """Test state of container cluster is Running state after performing start
+        """
+        try:
+            self.container_cluster.start(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to start container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ApplicationCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Running",
+                            "Check Application Cluster is in Stopped state"
+                        )
+        return
+
+    @attr(tags = ["advanced", "smoke"], required_hardware="false")
+    def test_04_destroy_container_cluster(self):
+        """Test destroy container cluster
+        """
+        try:
+            self.container_cluster.delete(self.apiclient)
+        except Exception as e:
+            self.fail("Failed to delete container cluster: %s" % e)
+            return
+
+        list_container_cluster_response = ApplicationCluster.list(
+                                            self.apiclient,
+                                            id=self.container_cluster.id
+                                            )
+        self.assertEqual(
+                            isinstance(list_container_cluster_response, list),
+                            True,
+                            "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            len(list_container_cluster_response),
+                            0,
+                            "Check container cluster in the list"
+                        )
+
+        self.assertEqual(
+                            list_container_cluster_response[0].state,
+                            "Destroyed",
+                            "Check Application Cluster is in Destroyed state"
+                        )
+        return
\ No newline at end of file
diff --git a/test/integration/component/application_clusters/test_application_cluster_provisioning.py b/test/integration/component/application_clusters/test_application_cluster_provisioning.py
new file mode 100644
index 00000000000..fdf319a2419
--- /dev/null
+++ b/test/integration/component/application_clusters/test_application_cluster_provisioning.py
@@ -0,0 +1,284 @@
+# Copyright 2016 ShapeBlue Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import marvin
+from marvin.cloudstackTestCase import *
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+class TestApplicationClusterProvisioning(cloudstackTestCase):
+    """
+        Tests for container cluster provisioning 
+    """
+    
+    def setUp(self):
+        testClient = super(TestApplicationClusterProvisioning, self).getClsTestClient()
+
+        self.apiclient = testClient.getApiClient()
+        self.dbclient = testClient.getDbConnection()
+
+        self.zone = get_zone(self.apiclient, testClient.getZoneForTests())
+        
+        self.service_offering = self.get_service_offering()
+        
+        self.ids_to_clean = []
+
+    
+    def tearDown(self):
+        if self.ids_to_clean is not None:
+            for id in self.ids_to_clean:
+                try:
+                    self.delete_cc(id)
+                except Exception:
+                    pass
+                else:
+                    print("Could not managed to delete Application Cluster with ID: " + id)
+                    
+    def delete_cc(self, id):
+        cmd = self.getDeleteCCCmd(id)
+        response = self.apiclient.deleteApplicationCluster(cmd)
+        self.assertEqual(response.success, True)
+        
+      
+    def getDeleteCCCmd(self, id):
+        cmd = deleteApplicationCluster.deleteApplicationClusterCmd()
+        cmd.id = id
+        
+        return cmd
+        
+    def get_service_offering(self):
+        response = list_service_offering(self.apiclient)
+        
+        if len(response) > 0:
+            self.service_offering = response[0]
+            return self.service_offering
+        else:
+            raise self.skipTest("No service offering found, skipping test")
+    
+    
+    def getCreateApplicationClusterCmd(self):
+        cmd = createApplicationCluster.createApplicationClusterCmd()
+        cmd.name = "TestCluester"
+        cmd.serviceofferingid = self.service_offering.id
+        cmd.zoneid = self.zone.id
+        cmd.size = 1
+                
+        return cmd
+    
+
+    def getListApplicationClusterCmd(self):
+        cmd = listApplicationCluster.listApplicationClusterCmd()
+        
+        return cmd
+
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_container_cluster_valid(self):
+        """
+            This is a valid scenario of containing a container cluster
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+
+        #Execute the API Command
+        response = ApplicationCluster.create(
+                                           self.apiclient,
+                                           cmd.name, 
+                                           cmd.zoneid, 
+                                           cmd.serviceofferingid, 
+                                           cmd.size)
+        
+        #Check if job is successful
+        self.ids_to_clean.append("afb8541b-c461-4afe-b9d9-a64e2444f73c")
+        self.ids_to_clean.append(response.id)
+        
+        self.assertEqual(response.state, "Running")
+        
+        
+
+    
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_cc_with_name_emptyString(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.name = ''
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_cc_without_name(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.name = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+        
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")       
+    def test_create_cc_without_zoneid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.zoneid = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+                
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_without_serviceoffering(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.serviceofferingid = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+     
+     
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_without_clustersize(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.size = None
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+        
+   
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_clustersize(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.size = "Invalid-cluster-size"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+            
+            
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_zoneid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.zoneid = "Some-invalid-zoneid"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")        
+    def test_create_cc_invalid_serviceofferingid(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.serviceofferingid = "Some-invalid-service-offering_id"
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+
+
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")       
+    def test_create_cc_with_size_0(self):
+        """
+            This tests calls the CC API with missing name parameter
+            Should result in error
+        """
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+        cmd.size = 0
+        
+        #Execute the API Command
+        try:
+            response = self.apiclient.createApplicationCluster(cmd)
+        except Exception:
+            pass
+        else:
+            self.fail("Expected an exception to be thrown, failing")
+                
diff --git a/test/integration/component/application_clusters/test_provisioning_and_deployment.py b/test/integration/component/application_clusters/test_provisioning_and_deployment.py
new file mode 100644
index 00000000000..9870e664be7
--- /dev/null
+++ b/test/integration/component/application_clusters/test_provisioning_and_deployment.py
@@ -0,0 +1,186 @@
+# Copyright 2016 ShapeBlue Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import marvin
+
+from marvin.cloudstackTestCase import *
+from marvin.sshClient import SshClient
+from marvin.cloudstackAPI import *
+from marvin.lib.utils import *
+from marvin.lib.base import *
+from marvin.lib.common import *
+from marvin.lib.utils import (random_gen)
+from nose.plugins.attrib import attr
+import cmd
+
+
+class TestProvisioningAndDeployment(cloudstackTestCase):
+    """
+    Test
+    """
+
+    def setUp(self):
+        testClient = super(TestProvisioningAndDeployment, self).getClsTestClient()
+        self.apiclient = testClient.getApiClient()
+        self.mgtSvrDetails = self.config.__dict__["mgtSvr"][0].__dict__        
+
+        self.zone = get_zone(self.apiclient, testClient.getZoneForTests())
+        
+        self.service_offering = self.get_service_offering()
+        
+        self.cluster_endpoint = ""
+        self.cluster_password = ""
+        
+        self.ids_to_clean = []
+        #self.ids_to_clean.append("9e89d5ef-b31a-415e-b71d-2342cf69d308")
+
+    
+    def tearDown(self):
+        if self.ids_to_clean is not None:
+            for id in self.ids_to_clean:
+                try:
+                   self.delete_cc(id)
+                except Exception:
+                    pass
+                else:
+                    print("Could not managed to delete Application Cluster with ID: " + id)
+                    
+    def delete_cc(self, id):
+        cmd = self.getDeleteCCCmd(id)
+        response = self.apiclient.deleteApplicationCluster(cmd)
+        self.assertEqual(response.success, True)
+        return
+      
+    def getDeleteCCCmd(self, id):
+        cmd = deleteApplicationCluster.deleteApplicationClusterCmd()
+        cmd.id = id
+        
+        return cmd
+        
+    def get_service_offering(self):
+        response = list_service_offering(self.apiclient)
+        
+        if len(response) > 0:
+            self.service_offering = response[0]
+            return self.service_offering
+        else:
+            raise self.skipTest("No service offering found, skipping test")
+    
+    
+    def getCreateApplicationClusterCmd(self):
+        cmd = createApplicationCluster.createApplicationClusterCmd()
+        
+        cmd.name = "TestCluster"
+        cmd.serviceofferingid = self.service_offering.id
+        cmd.zoneid = self.zone.id
+        cmd.size = 1
+                
+        return cmd
+    
+    def stopCluster(self):
+        cmd = stopApplicationCluster.stopApplicationClusterCmd()
+        cmd.id = self.ids_to_clean[0]
+        print(cmd.id)
+        
+        response = ApplicationCluster.stop(self.apiclient, cmd)
+        return response.success
+            
+    def startCluster(self):
+        cmd = startApplicationCluster.startApplicationClusterCmd()
+        cmd.id = self.ids_to_clean[0]
+        print(cmd.id)
+
+        response = ApplicationCluster.start(self.apiclient, cmd)
+        return response.success
+    
+    def createApplicationCluster(self):
+        #Create API Command
+        cmd = self.getCreateApplicationClusterCmd()
+
+        #Execute the API Command
+        response = ApplicationCluster.create(
+                                           self.apiclient,
+                                           cmd.name, 
+                                           cmd.zoneid, 
+                                           cmd.serviceofferingid, 
+                                           cmd.size)
+        
+        #Check if job is successful
+        self.ids_to_clean.append(response.id)
+        
+        self.assertEqual(response.state, "Running")
+        
+        self.cluster_endpoint = response.endpoint
+        self.cluster_password = response.password
+        
+        return response.success
+    
+    
+    def deployRedis(self):
+        sshClient = SshClient(host=self.mgtSvrDetails["mgtSvrIp"], port=22, user=self.mgtSvrDetails["user"], passwd=self.mgtSvrDetails["passwd"]) 
+        
+        sshClient.execute("mkdir redis-example ")
+        sshClient.execute("wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-controller.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-master.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-proxy.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-sentinel-controller.yaml " +
+                                                                "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/redis-sentinel-service.yaml " +
+                                                                "-P /root/redis-example/")
+        sshClient.execute("sed -i -e 's/redis:v2/redis:v1/g' /root/redis-example/*.yaml")
+        sshClient.execute("mkdir redis-example/image")
+        sshClient.execute("wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/Dockerfile " +
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/redis-master.conf " +
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/redis-slave.conf " + 
+                                                        "https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/redis/image/run.sh -P /root/redis-example/image/")
+        
+        sshClient.execute("kubectl create -f /root/redis-example/redis-master.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-sentinel-service.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-controller.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        sshClient.execute("kubectl create -f /root/redis-example/redis-sentinel-controller.yaml -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+
+        res = sshClient.execute("kubectl scale rc redis --replicas=3 -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        self.assertEqual(res[0], "replicationcontroller \"redis\" scaled")
+
+        res = sshClient.execute("kubectl scale rc redis-sentinel --replicas=3 -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password)
+        self.assertEqual(res[0], "replicationcontroller \"redis-sentinel\" scaled")
+
+        res = sshClient.execute("kubectl delete pods redis-master -s " + self.cluster_endpoint + " --insecure-skip-tls-verify=true --username=admin --password=" + self.cluster_password )
+        self.assertEqual(res[0], "pod \"redis-master\" deleted")
+
+        sshClient.execute("rm -rf /root/redis-example/")
+       
+    
+    
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg"], required_hardware="false")   
+    def test_create_container_cluster_valid(self):
+        """
+            This is a valid scenario of containing a container cluster
+        """
+        
+        self.deployRedis()
+       
+        
+   
+    @attr(tags=["advanced", "advancedns","smoke","basic", "sg", "custom"], required_hardware="false")   
+    def test_stop_start_cc(self):
+        """
+            Test scenario for stopping and starting a running cluster
+        """
+        self.createApplicationCluster()
+        
+        self.assertEqual(True, self.stopCluster())
+        
+        self.assertEqual(True, self.startCluster())
+        
+        
\ No newline at end of file
diff --git a/ui/plugins/applicationClusters/applicationClusters.js b/ui/plugins/applicationClusters/applicationClusters.js
new file mode 100644
index 00000000000..ad8f1b8fcd9
--- /dev/null
+++ b/ui/plugins/applicationClusters/applicationClusters.js
@@ -0,0 +1,826 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+(function (cloudStack) {
+
+    var rootCaCert = "";
+    var downloadCaCert = function() {
+        var blob = new Blob([rootCaCert], {type: 'application/x-x509-ca-cert'});
+        var filename = "cloudstack-containerservice.pem";
+        if(window.navigator.msSaveOrOpenBlob) {
+            window.navigator.msSaveBlob(blob, filename);
+        } else{
+            var elem = window.document.createElement('a');
+            elem.href = window.URL.createObjectURL(blob);
+            elem.download = filename;
+            document.body.appendChild(elem)
+            elem.click();
+            document.body.removeChild(elem);
+        }
+    };
+    cloudStack.plugins.applicationclusters = function(plugin) {
+        plugin.ui.addSection({
+            id: 'ccs',
+            title: 'Application Cluster Service',
+            preFilter: function(args) {
+                return true;
+            },
+            showOnNavigation: true,
+            sections: {
+                applicationcluster: {
+                    id: 'applicationclusters',
+                    listView: {
+                        section: 'applicationcluster',
+                        filters: {
+                            all: {
+                                label: 'ui.listView.filters.all'
+                            },
+                            running: {
+                                label: 'state.Running'
+                            },
+                            stopped: {
+                                label: 'state.Stopped'
+                            },
+                            destroyed: {
+                                label: 'state.Destroyed'
+                            }
+                        },
+                        fields: {
+                            name: {
+                                label: 'label.name'
+                            },
+                            zonename: {
+                                label: 'label.zone.name'
+                            },
+                            size : {
+                                label: 'label.size'
+                            },
+                            cpunumber: {
+                                label: 'label.num.cpu.cores'
+                            },
+                            memory: {
+                                label: 'label.memory.mb'
+                            },
+                            state: {
+                                label: 'label.state',
+                                indicator: {
+                                    'Running': 'on',
+                                    'Stopped': 'off',
+                                    'Destroyed': 'off',
+                                    'Error': 'off'
+                                }
+                            }
+                        },
+
+                        advSearchFields: {
+                            name: {
+                                label: 'label.name'
+                            },
+                            zoneid: {
+                                label: 'label.zone',
+                                select: function(args) {
+                                    $.ajax({
+                                        url: createURL('listZones'),
+                                        data: {
+                                            listAll: true
+                                        },
+                                        success: function(json) {
+                                            var zones = json.listzonesresponse.zone ? json.listzonesresponse.zone : [];
+
+                                            args.response.success({
+                                                data: $.map(zones, function(zone) {
+                                                    return {
+                                                        id: zone.id,
+                                                        description: zone.name
+                                                    };
+                                                })
+                                            });
+                                        }
+                                    });
+                                }
+                            },
+                        },
+
+                        // List view actions
+                        actions: {
+                            showCACert: {
+                                label: 'Download CA Certificate',
+                                isHeader: true,
+                                messages: {
+                                    notification: function(args) {
+                                        return 'Download Application Cluster Service Root CA Certificate';
+                                    }
+                                },
+                                createForm: {
+                                    title: 'Download Application Cluster Service Root CA Certificate?',
+                                    fields: {
+                                        certificate: {
+                                            label: 'label.certificate',
+                                            isTextarea: true,
+                                            defaultValue: function(args) {
+                                                $.ajax({
+                                                    url: createURL("listApplicationClusterCACert"),
+                                                    dataType: "json",
+                                                    async: false,
+                                                    success: function(json) {
+                                                        rootCaCert = json.listapplicationclustercacertresponse.rootcacert.certificate;
+                                                    }
+                                                });
+                                                return rootCaCert;
+                                            }
+                                        }
+                                    }
+                                },
+                                action: function(args) {
+                                    downloadCaCert();
+                                    args.response.success({});
+                                },
+                            },
+                            add: {
+                                label: 'Add an application cluster',
+                                createForm: {
+                                    title: 'Add an application cluster',
+                                    preFilter: cloudStack.preFilter.createTemplate,
+                                    fields: {
+                                        name: {
+                                            label: 'label.name',
+                                            //docID: 'Name of the cluster',
+                                            validation: {
+                                                required: true
+                                            }
+                                        },
+                                        description: {
+                                            label: 'label.description',
+                                            //docID: 'helpApplicationClusterDesc',
+                                        },
+                                        zone: {
+                                            label: 'label.zone',
+                                            //docID: 'helpApplicationClusterZone',
+                                            validation: {
+                                                required: true
+                                            },
+                                            select: function(args) {
+                                                $.ajax({
+                                                    url: createURL("listZones&available=true"),
+                                                    dataType: "json",
+                                                    async: true,
+                                                    success: function(json) {
+                                                        var zoneObjs = [];
+                                                        var items = json.listzonesresponse.zone;
+                                                        if (items != null) {
+                                                            for (var i = 0; i < items.length; i++) {
+                                                                zoneObjs.push({
+                                                                    id: items[i].id,
+                                                                    description: items[i].name
+                                                                });
+                                                            }
+                                                        }
+                                                        args.response.success({
+                                                            data: zoneObjs
+                                                        });
+                                                    }
+                                                });
+                                            }
+                                        },
+                                        serviceoffering: {
+                                            label: 'label.menu.service.offerings',
+                                            //docID: 'helpApplicationClusterServiceOffering',
+                                            validation: {
+                                                required: true
+                                            },
+                                            select: function(args) {
+                                                $.ajax({
+                                                    url: createURL("listServiceOfferings"),
+                                                    dataType: "json",
+                                                    async: true,
+                                                    success: function(json) {
+                                                        var offeringObjs = [];
+                                                        var items = json.listserviceofferingsresponse.serviceoffering;
+                                                        if (items != null) {
+                                                            for (var i = 0; i < items.length; i++) {
+                                                                offeringObjs.push({
+                                                                    id: items[i].id,
+                                                                    description: items[i].name
+                                                                });
+                                                            }
+                                                        }
+                                                        args.response.success({
+                                                            data: offeringObjs
+                                                        });
+                                                    }
+                                                });
+                                            }
+                                        },
+                                        network: {
+                                            label: 'label.network',
+                                            //docID: 'helpApplicationClusterNetwork',
+                                            select: function(args) {
+                                                $.ajax({
+                                                    url: createURL("listNetworks"),
+                                                    dataType: "json",
+                                                    async: true,
+                                                    success: function(json) {
+                                                        var networkObjs = [];
+                                                        networkObjs.push({
+                                                            id: "",
+                                                            description: ""
+                                                        });
+                                                        var items = json.listnetworksresponse.network;
+                                                        if (items != null) {
+                                                            for (var i = 0; i < items.length; i++) {
+                                                                networkObjs.push({
+                                                                    id: items[i].id,
+                                                                    description: items[i].name
+                                                                });
+                                                            }
+                                                        }
+                                                        args.response.success({
+                                                            data: networkObjs
+                                                        });
+                                                    }
+                                                });
+                                            }
+                                        },
+                                        size: {
+                                            label: 'Cluster size',
+                                            //docID: 'helpApplicationClusterSize',
+                                            validation: {
+                                                required: true
+                                            },
+                                        },
+                                        sshkeypair: {
+                                            label: 'SSH keypair',
+                                            //docID: 'helpApplicationClusterSSH',
+                                            select: function(args) {
+                                                $.ajax({
+                                                    url: createURL("listSSHKeyPairs"),
+                                                    dataType: "json",
+                                                    async: true,
+                                                    success: function(json) {
+                                                        var keypairObjs = [];
+                                                        keypairObjs.push({
+                                                            id: "",
+                                                            description: ""
+                                                        });
+                                                        var items = json.listsshkeypairsresponse.sshkeypair;
+                                                        if (items != null) {
+                                                            for (var i = 0; i < items.length; i++) {
+                                                                keypairObjs.push({
+                                                                    id: items[i].name,
+                                                                    description: items[i].name
+                                                                });
+                                                            }
+                                                        }
+                                                        args.response.success({
+                                                            data: keypairObjs
+                                                        });
+                                                    }
+                                                });
+                                            }
+                                        },
+                                        supportPrivateRegistry: {
+                                            label: 'Private Registry',
+                                            isBoolean: true,
+                                            isChecked: false,
+                                        },
+                                        username: {
+                                            label: 'label.username',
+                                            dependsOn: 'supportPrivateRegistry',
+                                            validation: {
+                                                required: true
+                                            },
+                                            isHidden: true
+                                        },
+                                        password: {
+                                            label: 'label.password',
+                                            dependsOn: 'supportPrivateRegistry',
+                                            validation: {
+                                                required: true
+                                            },
+                                            isHidden: true,
+                                            isPassword: true
+                                        },
+                                        url: {
+                                            label: 'label.url',
+                                            dependsOn: 'supportPrivateRegistry',
+                                            validation: {
+                                                required: true
+                                            },
+                                            isHidden: true,
+                                        },
+                                        email: {
+                                            label: 'label.email',
+                                            dependsOn: 'supportPrivateRegistry',
+                                            validation: {
+                                                required: true
+                                            },
+                                            isHidden: true,
+                                        }
+                                    }
+                                },
+
+                                action: function(args) {
+                                    var data = {
+                                        name: args.data.name,
+                                        description: args.data.description,
+                                        zoneid: args.data.zone,
+                                        serviceofferingid: args.data.serviceoffering,
+                                        size: args.data.size,
+                                        keypair: args.data.sshkeypair
+                                    };
+
+                                    if (args.data.supportPrivateRegistry) {
+                                        $.extend(data, {
+                                            dockerregistryusername: args.data.username,
+                                            dockerregistrypassword: args.data.password,
+                                            dockerregistryurl: args.data.url,
+                                            dockerregistryemail: args.data.email
+                                        });
+                                    }
+
+                                    if (args.data.network != null && args.data.network.length > 0) {
+                                        $.extend(data, {
+                                            networkid: args.data.network
+                                        });
+                                    }
+                                    $.ajax({
+                                        url: createURL('createApplicationCluster'),
+                                        data: data,
+                                        success: function(json) {
+                                            var jid = json.createapplicationclusterresponse.jobid;
+                                            args.response.success({
+                                                _custom: {
+                                                    jobId: jid
+                                                }
+                                            });
+                                        },
+                                        error: function(XMLHttpResponse) {
+                                            var errorMsg = parseXMLHttpResponse(XMLHttpResponse);
+                                            args.response.error(errorMsg);
+                                        }
+                                    });
+                                },
+
+
+                                messages: {
+                                    notification: function(args) {
+                                        return 'Container Cluster Add';
+                                    }
+                                },
+                                notification: {
+                                    poll: pollAsyncJobResult
+                                }
+                            }
+                        },
+
+                        dataProvider: function(args) {
+                            var data = {
+                                    page: args.page,
+                                    pagesize: pageSize
+                                };
+                            listViewDataProvider(args, data);
+                            if (args.filterBy != null) { //filter dropdown
+                                if (args.filterBy.kind != null) {
+                                    switch (args.filterBy.kind) {
+                                        case "all":
+                                        break;
+                                        case "running":
+                                        $.extend(data, {
+                                            state: 'Running'
+                                        });
+                                        break;
+                                        case "stopped":
+                                        $.extend(data, {
+                                            state: 'Stopped'
+                                        });
+                                        break;
+                                        case "destroyed":
+                                        $.extend(data, {
+                                            state: 'Destroyed'
+                                        });
+                                        break;
+                                    }
+                                }
+                            }
+
+                            $.ajax({
+                                url: createURL("listApplicationCluster"),
+                                data: data,
+                                dataType: "json",
+                                sync: true,
+                                success: function(json) {
+                                    var items = json.listapplicationclusterresponse.applicationcluster;
+                                    args.response.success({
+                                        actionFilter: ccsActionfilter,
+                                        data: items
+                                    });
+                                }
+                            });
+                        },
+
+                        detailView: {
+                            name: 'container cluster details',
+                            isMaximized: true,
+                            actions: {
+                                start: {
+                                    label: 'Start Container Cluster',
+                                    action: function(args) {
+                                        $.ajax({
+                                            url: createURL("startApplicationCluster"),
+                                            data: {"id": args.context.applicationclusters[0].id},
+                                            dataType: "json",
+                                            async: true,
+                                            success: function(json) {
+                                                var jid = json.startapplicationclusterresponse.jobid;
+                                                args.response.success({
+                                                    _custom: {
+                                                        jobId: jid
+                                                    }
+                                                });
+                                            }
+                                        });
+                                    },
+                                    messages: {
+                                        confirm: function(args) {
+                                            return 'Please confirm that you want to start this container cluster.';
+                                        },
+                                        notification: function(args) {
+                                            return 'Started container cluster.';
+                                        }
+                                    },
+                                    notification: {
+                                        poll: pollAsyncJobResult
+                                    }
+                                },
+                                stop: {
+                                    label: 'Stop Container Cluster',
+                                    action: function(args) {
+                                        $.ajax({
+                                            url: createURL("stopApplicationCluster"),
+                                            data: {"id": args.context.applicationclusters[0].id},
+                                            dataType: "json",
+                                            async: true,
+                                            success: function(json) {
+                                                var jid = json.stopapplicationclusterresponse.jobid;
+                                                args.response.success({
+                                                    _custom: {
+                                                        jobId: jid
+                                                    }
+                                                });
+                                            }
+                                        });
+                                    },
+                                    messages: {
+                                        confirm: function(args) {
+                                            return 'Please confirm that you want to stop this container cluster.';
+                                        },
+                                        notification: function(args) {
+                                            return 'Stopped container cluster.';
+                                        }
+                                    },
+                                    notification: {
+                                        poll: pollAsyncJobResult
+                                    }
+                                },
+                                destroy: {
+                                    label: 'Destroy Cluster',
+                                    compactLabel: 'label.destroy',
+                                    createForm: {
+                                        title: 'Destroy Container Cluster',
+                                        desc: 'Destroy Container Cluster',
+                                        isWarning: true,
+                                        fields: {
+                                        }
+                                    },
+                                    messages: {
+                                        confirm: function(args) {
+                                            return 'Please confirm that you want to destroy this container cluster.';
+                                        },
+                                        notification: function(args) {
+                                            return 'Destroyed container cluster.';
+                                        }
+                                    },
+                                    action: function(args) {
+                                        var data = {
+                                            id: args.context.applicationclusters[0].id,
+                                            expunge: true
+                                        };
+                                        $.ajax({
+                                            url: createURL('deleteApplicationCluster'),
+                                            data: data,
+                                            dataType: "json",
+                                            async: true,
+                                            success: function(json) {
+                                                args.response.success({
+                                                    _custom: {
+                                                        jobId: json.deletecontaierclusterresponse.jobid,
+                                                        getUpdatedItem: function(json) {
+                                                            return { 'toRemove': true };
+                                                        }
+                                                    }
+                                                });
+                                            }
+                                        });
+                                    },
+                                    notification: {
+                                        poll: pollAsyncJobResult
+                                    }
+                                }
+                            },
+                            tabs: {
+                                // Details tab
+                                details: {
+                                    title: 'label.details',
+                                    fields: [{
+                                        id: {
+                                            label: 'label.id'
+                                        },
+                                        name: {
+                                            label: 'label.name'
+                                        },
+                                        zonename: {
+                                            label: 'label.zone.name'
+                                        },
+                                        size : {
+                                            label: 'Cluster Size'
+                                        },
+                                        cpunumber: {
+                                            label: 'label.num.cpu.cores'
+                                        },
+                                        memory: {
+                                            label: 'label.memory.mb'
+                                        },
+                                        state: {
+                                            label: 'label.state',
+                                        },
+                                        serviceofferingname: {
+                                            label: 'label.compute.offering'
+                                        },
+                                        associatednetworkname: {
+                                            label: 'label.network'
+                                        },
+                                        keypair: {
+                                            label: 'Ssh Key Pair'
+                                        },
+                                        endpoint: {
+                                            label: 'API endpoint',
+                                            isCopyPaste: true
+                                        },
+                                        consoleendpoint: {
+                                            label: 'Dashboard endpoint',
+                                            isCopyPaste: true
+                                        },
+                                        username: {
+                                            label: 'username',
+                                            isCopyPaste: true
+                                        },
+                                        password: {
+                                            label: 'password',
+                                            isCopyPaste: true
+                                        }
+                                    }],
+
+                                    dataProvider: function(args) {
+                                        $.ajax({
+                                            url: createURL("listApplicationCluster&id=" + args.context.applicationclusters[0].id),
+                                            dataType: "json",
+                                            async: true,
+                                            success: function(json) {
+                                                var jsonObj;
+                                                if (json.listapplicationclusterresponse.applicationcluster != null && json.listapplicationclusterresponse.applicationcluster.length > 0)
+                                                jsonObj = json.listapplicationclusterresponse.applicationcluster[0];
+                                                args.response.success({
+                                                    actionFilter: ccsActionfilter,
+                                                    data: jsonObj
+                                                });
+                                            }
+                                        });
+                                    }
+                                },
+                                console : {
+                                    title: 'Dashboard',
+                                    custom : function (args) {
+                                        var showDashboard = function() {
+                                            var endPoint = args.context.applicationclusters[0].consoleendpoint;
+                                            var username = args.context.applicationclusters[0].username;
+                                            var password = args.context.applicationclusters[0].password;
+                                            var protocol = endPoint.split("://")[0] + "://";
+                                            var uri = endPoint.split("://")[1];
+
+                                            if (!endPoint) {
+                                                return jQuery('<br><p>').html("Container cluster setup is under progress, please check again in few minutes.");
+                                            }
+
+                                            var dashboardUrl = endPoint;
+                                            if (username && password && endPoint) {
+                                                dashboardUrl = protocol + username + ":" + password + "@" + uri;
+                                            }
+                                            var popOut = '<p align="right"><a href="' + dashboardUrl + '" target="_blank">Pop-out ↗</a></p>';
+                                            var iframe = popOut + '<iframe src="';
+                                            var iframeArgs = '" width="770" height="560")>';
+                                            return jQuery(iframe.concat(dashboardUrl, iframeArgs));
+                                        };
+
+                                        var showNotice = function(msg) {
+                                            var msg = "The dashboard gives a GUI that allows you to deploy your containerized applications within your container clusters using Kubernetes. In order to be able to access the dashboard from your browser, you need to import the container cluster root CA certificate in your browser.";
+                                            var links = [
+                                                {name: 'Chrome on Windows', url: 'https://support.globalsign.com/customer/portal/articles/1211541-install-client-digital-certificate---windows-using-chrome'},
+                                                {name: 'Firefox on Windows', url: 'https://support.globalsign.com/customer/portal/articles/1211486-install-client-digital-certificate---firefox-for-windows'},
+                                                {name: 'IE on Windows', url: 'https://msdn.microsoft.com/en-us/library/cc750534.aspx'}
+                                            ];
+                                            var linkMessage = $('<br><br><span>').html("You may use the following links for step-by-step instructions on importing root CA certificate in following browsers:");
+                                            $.each(links, function(idx, item) {
+                                                linkMessage.append($('<br><br><a href="' + item.url + '">').html(item.name));
+                                            });
+                                            return $(
+                                                $('<span>').addClass('message').html(msg)
+                                            ).dialog({
+                                                title: "Have you installed CA certificate?",
+                                                dialogClass: args.isWarning ? 'confirm warning': 'confirm',
+                                                closeOnEscape: false,
+                                                zIndex: 5000,
+                                                buttons: [{
+                                                    text: "I've imported the certificate",
+                                                    'class': 'cancel',
+                                                    'style': 'height: 40px',
+                                                    click: function() {
+                                                        $.cookie('ccs.show.cacert.msg', '1');
+                                                        $(this).dialog('destroy');
+                                                        $('div.overlay').remove();
+                                                        $('.hovered-elem').hide();
+                                                    }
+                                                }, {
+                                                    text: "Download CA Certificate",
+                                                    'class': 'ok',
+                                                    'style': 'height: 40px',
+                                                    click: function() {
+                                                        downloadCaCert();
+                                                    }
+                                                }]
+                                            }).closest('.ui-dialog').overlay();
+                                        };
+
+                                        if (!$.cookie('ccs.show.cacert.msg')) {
+                                          showNotice();
+                                        }
+                                        return showDashboard();
+                                    }
+                                },
+                                clusterinstances: {
+                                    title: 'Instances',
+                                    listView: {
+                                        section: 'clusterinstances',
+                                        fields: {
+                                            name: {
+                                                label: 'label.name',
+                                                truncate: true
+                                            },
+                                            instancename: {
+                                                label: 'label.internal.name'
+                                            },
+                                            displayname: {
+                                                label: 'label.display.name',
+                                                truncate: true
+                                            },
+                                            ipaddress: {
+                                                label: 'label.ip.address'
+                                            },
+                                            zonename: {
+                                                label: 'label.zone.name'
+                                            },
+                                            state: {
+                                                label: 'label.state',
+                                                indicator: {
+                                                    'Running': 'on',
+                                                    'Stopped': 'off',
+                                                    'Destroyed': 'off',
+                                                    'Error': 'off'
+                                                }
+                                            }
+                                        },
+                                        dataProvider: function(args) {
+                                            var data = {};
+                                            listViewDataProvider(args, data);
+
+                                            $.ajax({
+                                                url: createURL("listApplicationCluster"),
+                                                data: {"id": args.context.applicationclusters[0].id},
+                                                success: function(json) {
+                                                    var items = json.listapplicationclusterresponse.applicationcluster;
+
+                                                    var vmlist = [];
+                                                    $.each(items, function(idx, item) {
+                                                        if ("virtualmachineids" in item) {
+                                                            vmlist = vmlist.concat(item.virtualmachineids);
+                                                        }
+                                                    });
+
+                                                    $.extend(data, {
+                                                        ids: vmlist.join()
+                                                    });
+
+                                                    if (data.ids.length == 0) {
+                                                        args.response.success({
+                                                            data: []
+                                                        });
+                                                    } else {
+                                                        $.ajax({
+                                                            url: createURL('listVirtualMachines'),
+                                                            data: data,
+                                                            success: function(json) {
+                                                                var items = json.listvirtualmachinesresponse.virtualmachine;
+                                                                if (items) {
+                                                                    $.each(items, function(idx, vm) {
+                                                                        if (vm.nic && vm.nic.length > 0 && vm.nic[0].ipaddress) {
+                                                                            items[idx].ipaddress = vm.nic[0].ipaddress;
+                                                                        }
+                                                                    });
+                                                                }
+                                                                args.response.success({
+                                                                    data: items
+                                                                });
+                                                            },
+                                                            error: function(XMLHttpResponse) {
+                                                                cloudStack.dialog.notice({
+                                                                    message: parseXMLHttpResponse(XMLHttpResponse)
+                                                                });
+                                                                args.response.error();
+                                                            }
+                                                        });
+                                                    }
+                                                }
+                                            });
+                                       },
+                                    }
+                                },
+                                firewall: {
+                                    title: 'label.firewall',
+                                    custom: function(args) {
+                                        $.ajax({
+                                            url: createURL('listNetworks'),
+                                            data: {id: args.context.applicationclusters[0].networkid, listAll: true},
+                                            async: false,
+                                            dataType: "json",
+                                            success: function(json) {
+                                                var network = json.listnetworksresponse.network;
+                                                $.extend(args.context, {"networks": [network]});
+                                            }
+                                        });
+
+                                        $.ajax({
+                                            url: createURL('listPublicIpAddresses'),
+                                            data: {associatedNetworkId: args.context.applicationclusters[0].networkid, listAll: true, forvirtualnetwork: true},
+                                            async: false,
+                                            dataType: "json",
+                                            success: function(json) {
+                                                var ips = json.listpublicipaddressesresponse.publicipaddress;
+                                                var fwip = ips[0];
+                                                $.each(ips, function(idx, ip) {
+                                                    if (ip.issourcenat || ip.isstaticnat) {
+                                                        fwip = ip;
+                                                        return false;
+                                                    }
+                                                });
+                                                $.extend(args.context, {"ipAddresses": [fwip]});
+                                            }
+                                        });
+                                        return cloudStack.sections.network.sections.ipAddresses.listView.detailView.tabs.ipRules.custom(args);
+                                    },
+                                },
+                            }
+                        }
+                    }
+                },
+            }
+
+        });
+    };
+
+    var ccsActionfilter = cloudStack.actionFilter.ccsActionfilter = function(args) {
+        var jsonObj = args.context.item;
+        var allowedActions = [];
+        if (jsonObj.state != "Destroyed" && jsonObj.state != "Destroying") {
+            if (jsonObj.state == "Stopped") {
+                allowedActions.push("start");
+            } else {
+                allowedActions.push("stop");
+            }
+            allowedActions.push("destroy");
+        }
+        return allowedActions;
+    }
+
+}(cloudStack));
diff --git a/ui/plugins/applicationClusters/applicationclusters.css b/ui/plugins/applicationClusters/applicationclusters.css
new file mode 100644
index 00000000000..2baacd5d04a
--- /dev/null
+++ b/ui/plugins/applicationClusters/applicationclusters.css
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
diff --git a/ui/plugins/applicationClusters/config.js b/ui/plugins/applicationClusters/config.js
new file mode 100644
index 00000000000..3114b6e0e76
--- /dev/null
+++ b/ui/plugins/applicationClusters/config.js
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2016 ShapeBlue Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+(function (cloudStack) {
+  cloudStack.plugins.applicationcluster.config = {
+    title: 'Application Cluster Service',
+    desc: 'Application Cluster Service',
+    externalLink: 'http://shapeblue.com/',
+    authorName: 'Shape Blue Ltd',
+    authorEmail: 'CCS-help@shapeblue.com'
+  };
+}(cloudStack));
diff --git a/ui/plugins/applicationClusters/icon.png b/ui/plugins/applicationClusters/icon.png
new file mode 100644
index 00000000000..1d049675c27
Binary files /dev/null and b/ui/plugins/applicationClusters/icon.png differ
diff --git a/ui/plugins/plugins.js b/ui/plugins/plugins.js
index 21da7a07f4d..391322e4c45 100644
--- a/ui/plugins/plugins.js
+++ b/ui/plugins/plugins.js
@@ -16,7 +16,8 @@
 // under the License.
 (function($, cloudStack) {
   cloudStack.plugins = [
-    //'testPlugin',
+      //'testPlugin',
+    'applicationClusters',
     'quota'
   ];
 }(jQuery, cloudStack));
diff --git a/utils/conf/k8s-master.yml b/utils/conf/k8s-master.yml
new file mode 100644
index 00000000000..164208546ca
--- /dev/null
+++ b/utils/conf/k8s-master.yml
@@ -0,0 +1,241 @@
+#cloud-config
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+---
+write-files:
+  - path: /etc/conf.d/nfs
+    permissions: '0644'
+    content: |
+      OPTS_RPC_MOUNTD=""
+
+  - path: /srv/kubernetes/basicauth.csv
+    permissions: '0644'
+    content: |
+      {{ k8s_master.password }},{{ k8s_master.user }},1
+
+  - path: /opt/bin/ca.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_master.ca.crt }}
+
+  - path: /opt/bin/kube-apiserver.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_master.apiserver.crt }}
+
+  - path: /opt/bin/kube-apiserver.key
+    permissions: '0644'
+    content: |
+      {{ k8s_master.apiserver.key }}
+
+  - path: /opt/bin/wupiao
+    permissions: '0755'
+    content: |
+      #!/bin/bash
+      # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen
+      [ -n "$1" ] && \
+        until curl -o /dev/null -sIf http://${1}; do \
+          sleep 1 && echo .;
+        done;
+      exit $?
+
+  - path: /srv/kubernetes/manifests/kube-system.json
+    content: |
+        {
+          "apiVersion": "v1",
+          "kind": "Namespace",
+          "metadata": {
+            "name": "kube-system"
+          }
+        }
+
+  - path: /opt/bin/install-kube-system
+    permissions: 0700
+    owner: root:root
+    content: |
+      #!/bin/bash -e
+      /usr/bin/curl -L -o /srv/kubernetes/manifests/dashboard-controller.json https://raw.githubusercontent.com/skippbox/configk8s/master/json/dashboard-controller.json
+
+      /usr/bin/curl -L -o /srv/kubernetes/manifests/dashboard-service.json https://raw.githubusercontent.com/skippbox/configk8s/master/json/dashboard-service.json
+
+      /usr/bin/curl -L -o /srv/kubernetes/manifests/skydns-svc.json https://raw.githubusercontent.com/skippbox/configk8s/master/json/skydns-svc.json
+
+      /usr/bin/curl -L -o /srv/kubernetes/manifests/skydns-rc.json https://raw.githubusercontent.com/skippbox/configk8s/master/json/skydns-rc.json
+
+      /usr/bin/curl -H "Content-Type: application/json" -XPOST -d @"/srv/kubernetes/manifests/kube-system.json" "http://127.0.0.1:8080/api/v1/namespaces"
+
+      /usr/bin/curl -H "Content-Type: application/json" -XPOST -d @"/srv/kubernetes/manifests/dashboard-service.json" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services"
+
+      /usr/bin/curl -H "Content-Type: application/json" -XPOST -d @"/srv/kubernetes/manifests/dashboard-controller.json" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers"
+
+      /usr/bin/curl -H "Content-Type: application/json" -XPOST -d @"/srv/kubernetes/manifests/skydns-svc.json" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services"
+
+      /usr/bin/curl -H "Content-Type: application/json" -XPOST -d @"/srv/kubernetes/manifests/skydns-rc.json" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers"
+
+hostname: master
+
+coreos:
+
+  etcd2:
+    name: master
+    listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+    advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
+    initial-cluster-token: k8s_etcd
+    listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
+    initial-advertise-peer-urls: http://$private_ipv4:2380
+    initial-cluster: master=http://$private_ipv4:2380
+    initial-cluster-state: new
+
+  fleet:
+    metadata: "role=master"
+
+  units:
+    - name: fleet.service
+      command: start
+
+    - name: flanneld.service
+      command: start
+      drop-ins:
+        - name: 50-network-config.conf
+          content: |
+            [Unit]
+            Requires=etcd2.service
+            [Service]
+            ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
+
+    - name: docker.service
+      command: start
+      drop-ins:
+        - name: 40-flannel.conf
+          content: |
+            [Unit]
+            Requires=flanneld.service
+            After=flanneld.service
+
+    - name: generate-serviceaccount-key.service
+      command: start
+      content: |
+        [Unit]
+        Description=Generate service-account key file
+        [Service]
+        ExecStartPre=-/usr/bin/mkdir -p /opt/bin
+        ExecStart=/bin/openssl genrsa -out /opt/bin/kube-serviceaccount.key 2048 2>/dev/null
+        RemainAfterExit=yes
+        Type=oneshot
+
+    - name: setup-network-environment.service
+      command: start
+      content: |
+        [Unit]
+        Description=Setup Network Environment
+        Documentation=https://github.com/kelseyhightower/setup-network-environment
+        Requires=network-online.target
+        After=network-online.target
+        [Service]
+        ExecStartPre=-/usr/bin/mkdir -p /opt/bin
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment
+        ExecStart=/opt/bin/setup-network-environment
+        RemainAfterExit=yes
+        Type=oneshot
+
+    - name: install-kube-system.service
+      command: start
+      content: |
+        [Unit]
+        Requires=kube-apiserver.service
+        After=kube-apiserver.service
+
+        [Service]
+        Type=simple
+        StartLimitInterval=0
+        Restart=on-failure
+        ExecStartPre=/usr/bin/curl http://127.0.0.1:8080/version
+        ExecStart=/opt/bin/install-kube-system
+
+    - name: kube-apiserver.service
+      command: start
+      content: |
+        [Unit]
+        Description=Kubernetes API Server
+        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+        Requires=setup-network-environment.service etcd2.service generate-serviceaccount-key.service
+        After=setup-network-environment.service etcd2.service generate-serviceaccount-key.service
+        [Service]
+        EnvironmentFile=/etc/network-environment
+        ExecStartPre=-/usr/bin/mkdir -p /opt/bin
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-apiserver -z /opt/bin/kube-apiserver https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kube-apiserver
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver
+        ExecStartPre=/opt/bin/wupiao 127.0.0.1:2379/v2/machines
+        ExecStart=/opt/bin/kube-apiserver \
+        --service-account-key-file=/opt/bin/kube-apiserver.key \
+        --service-account-lookup=false \
+        --basic-auth-file=/srv/kubernetes/basicauth.csv \
+        --admission-control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \
+        --runtime-config=api/v1 \
+        --allow-privileged=true \
+        --insecure-port=8080 \
+        --secure-port=443 \
+        --client-ca-file=/opt/bin/ca.crt \
+        --tls-cert-file=/opt/bin/kube-apiserver.crt \
+        --tls-private-key-file=/opt/bin/kube-apiserver.key \
+        --service-cluster-ip-range=10.0.0.0/16 \
+        --etcd-servers=http://127.0.0.1:2379 \
+        --bind-address=0.0.0.0 \
+        --insecure-bind-address=0.0.0.0 \
+        --logtostderr=true
+        Restart=always
+        RestartSec=10
+
+    - name: kube-controller-manager.service
+      command: start
+      content: |
+        [Unit]
+        Description=Kubernetes Controller Manager
+        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+        Requires=kube-apiserver.service
+        After=kube-apiserver.service
+        [Service]
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-controller-manager -z /opt/bin/kube-controller-manager https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kube-controller-manager
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager
+        ExecStart=/opt/bin/kube-controller-manager \
+        --service-account-private-key-file=/opt/bin/kube-apiserver.key \
+        --root-ca-file=/opt/bin/ca.crt \
+        --master=127.0.0.1:8080 \
+        --logtostderr=true
+        Restart=always
+        RestartSec=10
+
+    - name: kube-scheduler.service
+      command: start
+      content: |
+        [Unit]
+        Description=Kubernetes Scheduler
+        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+        Requires=kube-apiserver.service
+        After=kube-apiserver.service
+        [Service]
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-scheduler -z /opt/bin/kube-scheduler https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kube-scheduler
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler
+        ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080
+        Restart=always
+        RestartSec=10
+
+  update:
+    group: alpha
+    reboot-strategy: off
diff --git a/utils/conf/k8s-node.yml b/utils/conf/k8s-node.yml
new file mode 100644
index 00000000000..763898ae752
--- /dev/null
+++ b/utils/conf/k8s-node.yml
@@ -0,0 +1,132 @@
+#cloud-config
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+write-files:
+  - path: /opt/bin/wupiao
+    permissions: '0755'
+    content: |
+      #!/bin/bash
+      # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen
+      [ -n "$1" ] && [ -n "$2" ] && while ! curl --output /dev/null \
+        --silent --head --fail \
+        http://${1}:${2}; do sleep 1 && echo -n .; done;
+      exit $?
+
+  - path: /opt/bin/ca.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_node.ca.crt }}
+
+  - path: /opt/bin/kubelet.crt
+    permissions: '0644'
+    content: |
+      {{ k8s_node.client.crt }}
+
+  - path: /opt/bin/kubelet.key
+    permissions: '0644'
+    content: |
+      {{ k8s_node.client.key }}
+
+coreos:
+  etcd2:
+    listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+    advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+    initial-cluster: master=http://{{ k8s_master.default_ip }}:2380
+    proxy: on
+  fleet:
+    metadata: "role=node"
+  units:
+    - name: etcd2.service
+      command: start
+    - name: fleet.service
+      command: start
+    - name: flanneld.service
+      command: start
+    - name: docker.service
+      drop-ins:
+        - name: 40-flannel.conf
+          content: |
+            [Unit]
+            Requires=flanneld.service
+            After=flanneld.service
+      command: start
+    - name: setup-network-environment.service
+      command: start
+      content: |
+        [Unit]
+        Description=Setup Network Environment
+        Documentation=https://github.com/kelseyhightower/setup-network-environment
+        Requires=network-online.target
+        After=network-online.target
+        [Service]
+        ExecStartPre=-/usr/bin/mkdir -p /opt/bin
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment
+        ExecStart=/opt/bin/setup-network-environment
+        RemainAfterExit=yes
+        Type=oneshot
+    - name: kube-proxy.service
+      command: start
+      content: |
+        [Unit]
+        Description=Kubernetes Proxy
+        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+        Requires=setup-network-environment.service
+        After=setup-network-environment.service
+        [Service]
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-proxy -z /opt/bin/kube-proxy https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kube-proxy
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy
+        # wait for kubernetes master to be up and ready
+        ExecStartPre=/opt/bin/wupiao {{ k8s_master.default_ip }} 8080
+        ExecStart=/opt/bin/kube-proxy \
+        --master={{ k8s_master.default_ip }}:8080 \
+        --logtostderr=true
+        Restart=always
+        RestartSec=10
+    - name: kube-kubelet.service
+      command: start
+      content: |
+        [Unit]
+        Description=Kubernetes Kubelet
+        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+        Requires=setup-network-environment.service
+        After=setup-network-environment.service
+        [Service]
+        EnvironmentFile=/etc/network-environment
+        ExecStartPre=/usr/bin/curl -L -o /opt/bin/kubelet -z /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kubelet
+        ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet
+        # wait for kubernetes master to be up and ready
+        ExecStartPre=/opt/bin/wupiao {{ k8s_master.default_ip }} 8080
+        ExecStart=/opt/bin/kubelet \
+        --address=0.0.0.0 \
+        --port=10250 \
+        --cluster-dns=10.0.0.10 \
+        --cluster-domain=cluster.local \
+        --hostname-override=${DEFAULT_IPV4} \
+        --api-servers={{ k8s_master.default_ip }}:8080 \
+        --tls-cert-file=/opt/bin/kubelet.crt \
+        --tls-private-key-file=/opt/bin/kubelet.key \
+        --allow-privileged=true \
+        --logtostderr=true \
+        --cadvisor-port=4194 \
+        --healthz-bind-address=0.0.0.0 \
+        --healthz-port=10248
+        Restart=always
+        RestartSec=10
+  update:
+    group: alpha
+    reboot-strategy: off


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


> Application Container Service
> -----------------------------
>
>                 Key: CLOUDSTACK-9815
>                 URL: https://issues.apache.org/jira/browse/CLOUDSTACK-9815
>             Project: CloudStack
>          Issue Type: New Feature
>      Security Level: Public(Anyone can view this level - this is the default.) 
>            Reporter: Daan Hoogland
>            Priority: Major
>




--
This message was sent by Atlassian JIRA
(v7.6.3#76005)