You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@airflow.apache.org by ka...@apache.org on 2020/01/15 20:36:25 UTC

[airflow-on-k8s-operator] branch master updated: First commit. Performing code donation. (#1)

This is an automated email from the ASF dual-hosted git repository.

kaxilnaik pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/airflow-on-k8s-operator.git


The following commit(s) were added to refs/heads/master by this push:
     new 7314aaf  First commit. Performing code donation. (#1)
7314aaf is described below

commit 7314aafdca3f2827b3928e9775d37df513b9f37f
Author: Aizhamal Nurmamat kyzy <ai...@gmail.com>
AuthorDate: Wed Jan 15 12:36:15 2020 -0800

    First commit. Performing code donation. (#1)
---
 Dockerfile                                         |  19 +
 Gopkg.lock                                         | 728 +++++++++++++++++
 Gopkg.toml                                         |  45 +
 LICENSE                                            | 169 ++++
 Makefile                                           |  81 ++
 README                                             |   0
 README.md                                          |  49 ++
 cloudbuild.yaml                                    |  25 +
 cmd/manager/main.go                                |  81 ++
 config/crds/airflow_v1alpha1_airflowbase.yaml      | 217 +++++
 config/crds/airflow_v1alpha1_airflowcluster.yaml   | 373 +++++++++
 config/default/kustomization.yaml                  |  49 ++
 config/default/manager/manager.yaml                |  98 +++
 config/default/manager_image_patch.yaml            |  37 +
 config/default/rbac/rbac_role.yaml                 | 191 +++++
 config/default/rbac/rbac_role_binding.yaml         |  29 +
 config/rbac/rbac_role.yaml                         | 191 +++++
 config/rbac/rbac_role_binding.yaml                 |  29 +
 config/samples/airflow_v1alpha1_airflowbase.yaml   |  25 +
 .../samples/airflow_v1alpha1_airflowcluster.yaml   |  25 +
 docs/airflow-base.png                              | Bin 0 -> 32950 bytes
 docs/airflow-cluster.png                           | Bin 0 -> 45466 bytes
 docs/airflow-multi-node.png                        | Bin 0 -> 29734 bytes
 docs/airflow-pod.png                               | Bin 0 -> 37053 bytes
 docs/airflow-region-spread.png                     | Bin 0 -> 18013 bytes
 docs/airflow-zone-spread.png                       | Bin 0 -> 18512 bytes
 docs/api.md                                        | 256 ++++++
 docs/design.md                                     | 127 +++
 docs/development.md                                |  71 ++
 docs/quickstart.md                                 | 123 +++
 docs/userguide.md                                  |  37 +
 hack/appcrd.yaml                                   | 142 ++++
 hack/boilerplate.go.txt                            |  15 +
 hack/sample/cloudsql-celery/base.yaml              |  28 +
 hack/sample/cloudsql-celery/cluster.yaml           |  43 +
 hack/sample/cloudsql-celery/sqlproxy-secret.yaml   |  24 +
 hack/sample/cloudsql-k8s/cluster.yaml              |  38 +
 hack/sample/cloudsql-local/cluster.yaml            |  29 +
 hack/sample/mysql-celery-gcs/cluster.yaml          |  42 +
 hack/sample/mysql-celery/base.yaml                 |  26 +
 hack/sample/mysql-celery/cluster.yaml              |  46 ++
 hack/sample/mysql-k8s/cluster.yaml                 |  40 +
 hack/sample/mysql-local/cluster.yaml               |  35 +
 .../postgres-celery-memorystore/cluster.yaml       |  47 ++
 hack/sample/postgres-celery-redis/cluster.yaml     |  45 +
 .../sample/postgres-celery-redis/redis-secret.yaml |  24 +
 hack/sample/postgres-celery-redis/redis.yaml       |  59 ++
 hack/sample/postgres-celery/base.yaml              |  26 +
 hack/sample/postgres-celery/cluster.yaml           |  43 +
 hack/sample/postgres-k8s/cluster.yaml              |  38 +
 hack/sample/postgres-local/cluster.yaml            |  30 +
 pkg/apis/addtoscheme_airflow_v1alpha1.go           |  25 +
 pkg/apis/airflow/group.go                          |  17 +
 pkg/apis/airflow/v1alpha1/airflowbase_types.go     | 560 +++++++++++++
 .../airflow/v1alpha1/airflowbase_types_test.go     |  57 ++
 pkg/apis/airflow/v1alpha1/airflowcluster_types.go  | 616 ++++++++++++++
 .../airflow/v1alpha1/airflowcluster_types_test.go  |  57 ++
 pkg/apis/airflow/v1alpha1/doc.go                   |  22 +
 pkg/apis/airflow/v1alpha1/register.go              |  45 +
 pkg/apis/airflow/v1alpha1/v1alpha1_suite_test.go   |  54 ++
 pkg/apis/airflow/v1alpha1/zz_generated.deepcopy.go | 794 ++++++++++++++++++
 pkg/apis/apis.go                                   |  32 +
 pkg/controller/add_airflowbase.go                  |  25 +
 pkg/controller/add_airflowcluster.go               |  25 +
 .../airflowbase/airflowbase_controller.go          | 352 ++++++++
 .../airflowbase_controller_suite_test.go           |  74 ++
 .../airflowbase/airflowbase_controller_test.go     |  98 +++
 .../airflowcluster/airflowcluster_controller.go    | 906 +++++++++++++++++++++
 .../airflowcluster_controller_suite_test.go        |  74 ++
 .../airflowcluster_controller_test.go              | 133 +++
 pkg/controller/application/application.go          | 111 +++
 .../application/application_suite_test.go          |  28 +
 pkg/controller/application/application_test.go     |  99 +++
 pkg/controller/application/doc.go                  |  18 +
 pkg/controller/common/common.go                    | 119 +++
 pkg/controller/controller.go                       |  33 +
 pkg/webhook/webhook.go                             |  36 +
 templates/airflow-configmap.yaml                   | 378 +++++++++
 templates/base-application.yaml                    |  53 ++
 templates/cluster-application.yaml                 |  53 ++
 templates/flower-sts.yaml                          |  70 ++
 templates/headlesssvc.yaml                         |  36 +
 templates/mysql-sts.yaml                           | 119 +++
 templates/nfs-sts.yaml                             |  81 ++
 templates/pdb.yaml                                 |  32 +
 templates/postgres-sts.yaml                        | 104 +++
 templates/redis-sts.yaml                           | 106 +++
 templates/rolebinding.yaml                         |  34 +
 templates/scheduler-sts.yaml                       |  73 ++
 templates/secret.yaml                              |  29 +
 templates/serviceaccount.yaml                      |  25 +
 templates/sqlproxy-sts.yaml                        |  78 ++
 templates/storage.yaml                             |  28 +
 templates/svc.yaml                                 |  36 +
 templates/ui-sts.yaml                              |  81 ++
 templates/worker-sts.yaml                          |  71 ++
 test/e2e/base_test.go                              |  99 +++
 test/e2e/cluster_test.go                           | 190 +++++
 test/e2e/gcp_test.go                               | 193 +++++
 .../controller-reconciler/pkg/finalizer/doc.go     |  18 +
 .../pkg/finalizer/finalizer.go                     |  80 ++
 .../pkg/finalizer/finalizer_suite_test.go          |  28 +
 .../pkg/finalizer/finalizer_test.go                |  63 ++
 .../pkg/finalizer/zz_generated.deepcopy.go         |  19 +
 .../pkg/genericreconciler/doc.go                   |  17 +
 .../pkg/genericreconciler/genericreconciler.go     | 442 ++++++++++
 .../genericreconciler_suite_test.go                |  28 +
 .../genericreconciler/genericreconciler_test.go    | 109 +++
 .../pkg/genericreconciler/handler.go               | 115 +++
 .../pkg/genericreconciler/types.go                 |  37 +
 .../pkg/genericreconciler/v1alpha1/crfoo.go        | 148 ++++
 .../pkg/genericreconciler/v1alpha1/doc.go          |  18 +
 .../pkg/genericreconciler/v1alpha1/testutil.go     |  18 +
 .../v1alpha1/zz_generated.deepcopy.go              | 132 +++
 .../controller-reconciler/pkg/reconciler/doc.go    |  17 +
 .../pkg/reconciler/manager/gcp/disk/manager.go     | 257 ++++++
 .../pkg/reconciler/manager/gcp/gcs/manager.go      | 240 ++++++
 .../pkg/reconciler/manager/gcp/redis/manager.go    | 288 +++++++
 .../pkg/reconciler/manager/gcp/utils.go            | 139 ++++
 .../pkg/reconciler/manager/interface.go            |  28 +
 .../pkg/reconciler/manager/internal.go             |  38 +
 .../pkg/reconciler/manager/k8s/manager.go          | 647 +++++++++++++++
 .../pkg/reconciler/manager/types.go                |  19 +
 .../pkg/reconciler/resource.go                     |  79 ++
 .../pkg/reconciler/resource_suite_test.go          |  28 +
 .../pkg/reconciler/resource_test.go                | 176 ++++
 .../pkg/reconciler/testdata/sts.yaml               |  66 ++
 .../pkg/reconciler/testdata/unknown_rsrc.yaml      |  18 +
 .../controller-reconciler/pkg/reconciler/types.go  |  59 ++
 .../controller-reconciler/pkg/status/condition.go  | 169 ++++
 .../controller-reconciler/pkg/status/doc.go        |  17 +
 .../controller-reconciler/pkg/status/status.go     | 106 +++
 .../pkg/status/status_suite_test.go                |  28 +
 .../pkg/status/status_test.go                      | 240 ++++++
 .../controller-reconciler/pkg/status/types.go      | 139 ++++
 .../pkg/status/zz_generated.deepcopy.go            | 160 ++++
 .../controller-reconciler/pkg/storage/doc.go       |  18 +
 .../controller-reconciler/pkg/storage/storage.go   |  69 ++
 .../pkg/storage/storage_suite_test.go              |  28 +
 .../pkg/storage/storage_test.go                    |  96 +++
 .../controller-reconciler/pkg/storage/types.go     |  54 ++
 .../pkg/storage/zz_generated.deepcopy.go           |  92 +++
 .../controller-reconciler/pkg/test/framework.go    | 296 +++++++
 .../controller-reconciler/pkg/test/helper.go       |  66 ++
 144 files changed, 15223 insertions(+)

diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..3ab6ccd
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,19 @@
+# Build the manager binary
+FROM golang:1.10.3 as builder
+
+# Copy in the go src
+WORKDIR /go/src/k8s.io/airflow-operator
+COPY pkg/    pkg/
+COPY cmd/    cmd/
+COPY vendor/ vendor/
+
+# Build
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager k8s.io/airflow-operator/cmd/manager
+
+# Copy the controller-manager into a thin image
+FROM ubuntu:latest
+WORKDIR /root/
+COPY --from=builder /go/src/k8s.io/airflow-operator/manager .
+COPY templates/ templates/
+COPY config/crds/ crds/
+ENTRYPOINT ["./manager"]
diff --git a/Gopkg.lock b/Gopkg.lock
new file mode 100644
index 0000000..2d94e0a
--- /dev/null
+++ b/Gopkg.lock
@@ -0,0 +1,728 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "cloud.google.com/go"
+  packages = ["compute/metadata"]
+  revision = "74b12019e2aa53ec27882158f59192d7cd6d1998"
+  version = "v0.33.1"
+
+[[projects]]
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
+
+[[projects]]
+  name = "github.com/emicklei/go-restful"
+  packages = [
+    ".",
+    "log"
+  ]
+  revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0"
+  version = "v2.8.0"
+
+[[projects]]
+  name = "github.com/ghodss/yaml"
+  packages = ["."]
+  revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
+  version = "v1.0.0"
+
+[[projects]]
+  name = "github.com/go-logr/logr"
+  packages = ["."]
+  revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e"
+  version = "v0.1.0"
+
+[[projects]]
+  name = "github.com/go-logr/zapr"
+  packages = ["."]
+  revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab"
+  version = "v0.1.0"
+
+[[projects]]
+  name = "github.com/gobuffalo/envy"
+  packages = ["."]
+  revision = "b29bf6b8134f3398b9333ba1893c58620152edb0"
+  version = "v1.6.9"
+
+[[projects]]
+  name = "github.com/gogo/protobuf"
+  packages = [
+    "proto",
+    "sortkeys"
+  ]
+  revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
+  version = "v1.1.1"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/golang/glog"
+  packages = ["."]
+  revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/golang/groupcache"
+  packages = ["lru"]
+  revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa"
+
+[[projects]]
+  name = "github.com/golang/protobuf"
+  packages = [
+    "proto",
+    "ptypes",
+    "ptypes/any",
+    "ptypes/duration",
+    "ptypes/timestamp"
+  ]
+  revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
+  version = "v1.2.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/google/btree"
+  packages = ["."]
+  revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/google/gofuzz"
+  packages = ["."]
+  revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
+
+[[projects]]
+  name = "github.com/google/uuid"
+  packages = ["."]
+  revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "github.com/googleapis/gnostic"
+  packages = [
+    "OpenAPIv2",
+    "compiler",
+    "extensions"
+  ]
+  revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
+  version = "v0.2.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/gregjones/httpcache"
+  packages = [
+    ".",
+    "diskcache"
+  ]
+  revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
+
+[[projects]]
+  name = "github.com/hashicorp/golang-lru"
+  packages = [
+    ".",
+    "simplelru"
+  ]
+  revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
+  version = "v0.5.0"
+
+[[projects]]
+  name = "github.com/hpcloud/tail"
+  packages = [
+    ".",
+    "ratelimiter",
+    "util",
+    "watch",
+    "winfile"
+  ]
+  revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5"
+  version = "v1.0.0"
+
+[[projects]]
+  name = "github.com/imdario/mergo"
+  packages = ["."]
+  revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
+  version = "v0.3.6"
+
+[[projects]]
+  name = "github.com/inconshreveable/mousetrap"
+  packages = ["."]
+  revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
+  version = "v1.0"
+
+[[projects]]
+  name = "github.com/joho/godotenv"
+  packages = ["."]
+  revision = "23d116af351c84513e1946b527c88823e476be13"
+  version = "v1.3.0"
+
+[[projects]]
+  name = "github.com/json-iterator/go"
+  packages = ["."]
+  revision = "1624edc4454b8682399def8740d46db5e4362ba4"
+  version = "v1.1.5"
+
+[[projects]]
+  name = "github.com/markbates/inflect"
+  packages = ["."]
+  revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6"
+  version = "v1.0.4"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/mattbaird/jsonpatch"
+  packages = ["."]
+  revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f"
+
+[[projects]]
+  name = "github.com/modern-go/concurrent"
+  packages = ["."]
+  revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
+  version = "1.0.3"
+
+[[projects]]
+  name = "github.com/modern-go/reflect2"
+  packages = ["."]
+  revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+  version = "1.0.1"
+
+[[projects]]
+  name = "github.com/onsi/ginkgo"
+  packages = [
+    ".",
+    "config",
+    "internal/codelocation",
+    "internal/containernode",
+    "internal/failer",
+    "internal/leafnodes",
+    "internal/remote",
+    "internal/spec",
+    "internal/spec_iterator",
+    "internal/specrunner",
+    "internal/suite",
+    "internal/testingtproxy",
+    "internal/writer",
+    "reporters",
+    "reporters/stenographer",
+    "reporters/stenographer/support/go-colorable",
+    "reporters/stenographer/support/go-isatty",
+    "types"
+  ]
+  revision = "3774a09d95489ccaa16032e0770d08ea77ba6184"
+  version = "v1.6.0"
+
+[[projects]]
+  name = "github.com/onsi/gomega"
+  packages = [
+    ".",
+    "format",
+    "gbytes",
+    "gexec",
+    "internal/assertion",
+    "internal/asyncassertion",
+    "internal/oraclematcher",
+    "internal/testingtsupport",
+    "matchers",
+    "matchers/support/goraph/bipartitegraph",
+    "matchers/support/goraph/edge",
+    "matchers/support/goraph/node",
+    "matchers/support/goraph/util",
+    "types"
+  ]
+  revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369"
+  version = "v1.4.2"
+
+[[projects]]
+  name = "github.com/pborman/uuid"
+  packages = ["."]
+  revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
+  version = "v1.2"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/petar/GoLLRB"
+  packages = ["llrb"]
+  revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
+
+[[projects]]
+  name = "github.com/peterbourgon/diskv"
+  packages = ["."]
+  revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
+  version = "v2.0.1"
+
+[[projects]]
+  name = "github.com/pkg/errors"
+  packages = ["."]
+  revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
+  version = "v0.8.0"
+
+[[projects]]
+  name = "github.com/spf13/afero"
+  packages = [
+    ".",
+    "mem"
+  ]
+  revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
+  version = "v1.1.2"
+
+[[projects]]
+  name = "github.com/spf13/cobra"
+  packages = ["."]
+  revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
+  version = "v0.0.3"
+
+[[projects]]
+  name = "github.com/spf13/pflag"
+  packages = ["."]
+  revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
+  version = "v1.0.3"
+
+[[projects]]
+  name = "go.uber.org/atomic"
+  packages = ["."]
+  revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
+  version = "v1.3.2"
+
+[[projects]]
+  name = "go.uber.org/multierr"
+  packages = ["."]
+  revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "go.uber.org/zap"
+  packages = [
+    ".",
+    "buffer",
+    "internal/bufferpool",
+    "internal/color",
+    "internal/exit",
+    "zapcore"
+  ]
+  revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
+  version = "v1.9.1"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/crypto"
+  packages = ["ssh/terminal"]
+  revision = "3d3f9f413869b949e48070b5bc593aa22cc2b8f2"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = [
+    "context",
+    "context/ctxhttp",
+    "html",
+    "html/atom",
+    "html/charset",
+    "http/httpguts",
+    "http2",
+    "http2/hpack",
+    "idna"
+  ]
+  revision = "adae6a3d119ae4890b46832a2e88a95adc62b8e7"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/oauth2"
+  packages = [
+    ".",
+    "google",
+    "internal",
+    "jws",
+    "jwt"
+  ]
+  revision = "8f65e3013ebad444f13bc19536f7865efc793816"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/sys"
+  packages = [
+    "unix",
+    "windows"
+  ]
+  revision = "ec83556a53fe16b65c452a104ea9d1e86a671852"
+
+[[projects]]
+  name = "golang.org/x/text"
+  packages = [
+    "collate",
+    "collate/build",
+    "encoding",
+    "encoding/charmap",
+    "encoding/htmlindex",
+    "encoding/internal",
+    "encoding/internal/identifier",
+    "encoding/japanese",
+    "encoding/korean",
+    "encoding/simplifiedchinese",
+    "encoding/traditionalchinese",
+    "encoding/unicode",
+    "internal/colltab",
+    "internal/gen",
+    "internal/tag",
+    "internal/triegen",
+    "internal/ucd",
+    "internal/utf8internal",
+    "language",
+    "runes",
+    "secure/bidirule",
+    "transform",
+    "unicode/bidi",
+    "unicode/cldr",
+    "unicode/norm",
+    "unicode/rangetable"
+  ]
+  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
+  version = "v0.3.0"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/time"
+  packages = ["rate"]
+  revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/tools"
+  packages = [
+    "go/ast/astutil",
+    "imports",
+    "internal/fastwalk",
+    "internal/gopathwalk"
+  ]
+  revision = "9c8bd463e3aca40b6288cd14f7b7703206ff743b"
+
+[[projects]]
+  name = "google.golang.org/api"
+  packages = [
+    "compute/v1",
+    "gensupport",
+    "googleapi",
+    "googleapi/internal/uritemplates",
+    "redis/v1",
+    "storage/v1"
+  ]
+  revision = "e742f5a8defa1f9f5d723dfa04c962e680dc33f0"
+  version = "v0.2.0"
+
+[[projects]]
+  name = "google.golang.org/appengine"
+  packages = [
+    ".",
+    "internal",
+    "internal/app_identity",
+    "internal/base",
+    "internal/datastore",
+    "internal/log",
+    "internal/modules",
+    "internal/remote_api",
+    "internal/urlfetch",
+    "urlfetch"
+  ]
+  revision = "4a4468ece617fc8205e99368fa2200e9d1fad421"
+  version = "v1.3.0"
+
+[[projects]]
+  name = "gopkg.in/fsnotify.v1"
+  packages = ["."]
+  revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
+  source = "https://github.com/fsnotify/fsnotify.git"
+  version = "v1.4.7"
+
+[[projects]]
+  name = "gopkg.in/inf.v0"
+  packages = ["."]
+  revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
+  version = "v0.9.1"
+
+[[projects]]
+  branch = "v1"
+  name = "gopkg.in/tomb.v1"
+  packages = ["."]
+  revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
+
+[[projects]]
+  name = "gopkg.in/yaml.v2"
+  packages = ["."]
+  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+  version = "v2.2.1"
+
+[[projects]]
+  name = "k8s.io/api"
+  packages = [
+    "admission/v1beta1",
+    "admissionregistration/v1alpha1",
+    "admissionregistration/v1beta1",
+    "apps/v1",
+    "apps/v1beta1",
+    "apps/v1beta2",
+    "authentication/v1",
+    "authentication/v1beta1",
+    "authorization/v1",
+    "authorization/v1beta1",
+    "autoscaling/v1",
+    "autoscaling/v2beta1",
+    "batch/v1",
+    "batch/v1beta1",
+    "batch/v2alpha1",
+    "certificates/v1beta1",
+    "core/v1",
+    "events/v1beta1",
+    "extensions/v1beta1",
+    "networking/v1",
+    "policy/v1beta1",
+    "rbac/v1",
+    "rbac/v1alpha1",
+    "rbac/v1beta1",
+    "scheduling/v1alpha1",
+    "scheduling/v1beta1",
+    "settings/v1alpha1",
+    "storage/v1",
+    "storage/v1alpha1",
+    "storage/v1beta1"
+  ]
+  revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa"
+  version = "kubernetes-1.11.1"
+
+[[projects]]
+  name = "k8s.io/apiextensions-apiserver"
+  packages = [
+    "pkg/apis/apiextensions",
+    "pkg/apis/apiextensions/v1beta1",
+    "pkg/client/clientset/clientset",
+    "pkg/client/clientset/clientset/scheme",
+    "pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
+  ]
+  revision = "408db4a50408e2149acbd657bceb2480c13cb0a4"
+  version = "kubernetes-1.11.2"
+
+[[projects]]
+  name = "k8s.io/apimachinery"
+  packages = [
+    "pkg/api/errors",
+    "pkg/api/meta",
+    "pkg/api/resource",
+    "pkg/apis/meta/internalversion",
+    "pkg/apis/meta/v1",
+    "pkg/apis/meta/v1/unstructured",
+    "pkg/apis/meta/v1beta1",
+    "pkg/conversion",
+    "pkg/conversion/queryparams",
+    "pkg/fields",
+    "pkg/labels",
+    "pkg/runtime",
+    "pkg/runtime/schema",
+    "pkg/runtime/serializer",
+    "pkg/runtime/serializer/json",
+    "pkg/runtime/serializer/protobuf",
+    "pkg/runtime/serializer/recognizer",
+    "pkg/runtime/serializer/streaming",
+    "pkg/runtime/serializer/versioning",
+    "pkg/selection",
+    "pkg/types",
+    "pkg/util/cache",
+    "pkg/util/clock",
+    "pkg/util/diff",
+    "pkg/util/errors",
+    "pkg/util/framer",
+    "pkg/util/intstr",
+    "pkg/util/json",
+    "pkg/util/mergepatch",
+    "pkg/util/net",
+    "pkg/util/runtime",
+    "pkg/util/sets",
+    "pkg/util/strategicpatch",
+    "pkg/util/uuid",
+    "pkg/util/validation",
+    "pkg/util/validation/field",
+    "pkg/util/wait",
+    "pkg/util/yaml",
+    "pkg/version",
+    "pkg/watch",
+    "third_party/forked/golang/json",
+    "third_party/forked/golang/reflect"
+  ]
+  revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
+  version = "kubernetes-1.11.0"
+
+[[projects]]
+  name = "k8s.io/client-go"
+  packages = [
+    "discovery",
+    "dynamic",
+    "kubernetes",
+    "kubernetes/scheme",
+    "kubernetes/typed/admissionregistration/v1alpha1",
+    "kubernetes/typed/admissionregistration/v1beta1",
+    "kubernetes/typed/apps/v1",
+    "kubernetes/typed/apps/v1beta1",
+    "kubernetes/typed/apps/v1beta2",
+    "kubernetes/typed/authentication/v1",
+    "kubernetes/typed/authentication/v1beta1",
+    "kubernetes/typed/authorization/v1",
+    "kubernetes/typed/authorization/v1beta1",
+    "kubernetes/typed/autoscaling/v1",
+    "kubernetes/typed/autoscaling/v2beta1",
+    "kubernetes/typed/batch/v1",
+    "kubernetes/typed/batch/v1beta1",
+    "kubernetes/typed/batch/v2alpha1",
+    "kubernetes/typed/certificates/v1beta1",
+    "kubernetes/typed/core/v1",
+    "kubernetes/typed/events/v1beta1",
+    "kubernetes/typed/extensions/v1beta1",
+    "kubernetes/typed/networking/v1",
+    "kubernetes/typed/policy/v1beta1",
+    "kubernetes/typed/rbac/v1",
+    "kubernetes/typed/rbac/v1alpha1",
+    "kubernetes/typed/rbac/v1beta1",
+    "kubernetes/typed/scheduling/v1alpha1",
+    "kubernetes/typed/scheduling/v1beta1",
+    "kubernetes/typed/settings/v1alpha1",
+    "kubernetes/typed/storage/v1",
+    "kubernetes/typed/storage/v1alpha1",
+    "kubernetes/typed/storage/v1beta1",
+    "pkg/apis/clientauthentication",
+    "pkg/apis/clientauthentication/v1alpha1",
+    "pkg/apis/clientauthentication/v1beta1",
+    "pkg/version",
+    "plugin/pkg/client/auth/exec",
+    "plugin/pkg/client/auth/gcp",
+    "rest",
+    "rest/watch",
+    "restmapper",
+    "third_party/forked/golang/template",
+    "tools/auth",
+    "tools/cache",
+    "tools/clientcmd",
+    "tools/clientcmd/api",
+    "tools/clientcmd/api/latest",
+    "tools/clientcmd/api/v1",
+    "tools/leaderelection",
+    "tools/leaderelection/resourcelock",
+    "tools/metrics",
+    "tools/pager",
+    "tools/record",
+    "tools/reference",
+    "transport",
+    "util/buffer",
+    "util/cert",
+    "util/connrotation",
+    "util/flowcontrol",
+    "util/homedir",
+    "util/integer",
+    "util/jsonpath",
+    "util/retry",
+    "util/workqueue"
+  ]
+  revision = "1f13a808da65775f22cbf47862c4e5898d8f4ca1"
+  version = "kubernetes-1.11.2"
+
+[[projects]]
+  branch = "master"
+  name = "k8s.io/code-generator"
+  packages = [
+    "cmd/client-gen",
+    "cmd/client-gen/args",
+    "cmd/client-gen/generators",
+    "cmd/client-gen/generators/fake",
+    "cmd/client-gen/generators/scheme",
+    "cmd/client-gen/generators/util",
+    "cmd/client-gen/path",
+    "cmd/client-gen/types",
+    "cmd/deepcopy-gen",
+    "cmd/deepcopy-gen/args",
+    "pkg/util"
+  ]
+  revision = "405721ab9678fde04d78961eec9498820d80408d"
+
+[[projects]]
+  branch = "master"
+  name = "k8s.io/gengo"
+  packages = [
+    "args",
+    "examples/deepcopy-gen/generators",
+    "examples/set-gen/sets",
+    "generator",
+    "namer",
+    "parser",
+    "types"
+  ]
+  revision = "fd15ee9cc2f77baa4f31e59e6acbf21146455073"
+
+[[projects]]
+  name = "k8s.io/klog"
+  packages = ["."]
+  revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
+  version = "v0.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "k8s.io/kube-openapi"
+  packages = ["pkg/util/proto"]
+  revision = "0317810137be915b9cf888946c6e115c1bfac693"
+
+[[projects]]
+  name = "sigs.k8s.io/controller-runtime"
+  packages = [
+    "pkg/cache",
+    "pkg/cache/internal",
+    "pkg/client",
+    "pkg/client/apiutil",
+    "pkg/client/config",
+    "pkg/controller",
+    "pkg/envtest",
+    "pkg/envtest/printer",
+    "pkg/event",
+    "pkg/handler",
+    "pkg/internal/controller",
+    "pkg/internal/recorder",
+    "pkg/leaderelection",
+    "pkg/manager",
+    "pkg/patch",
+    "pkg/predicate",
+    "pkg/reconcile",
+    "pkg/recorder",
+    "pkg/runtime/inject",
+    "pkg/runtime/log",
+    "pkg/runtime/scheme",
+    "pkg/runtime/signals",
+    "pkg/source",
+    "pkg/source/internal",
+    "pkg/webhook/admission",
+    "pkg/webhook/admission/types",
+    "pkg/webhook/types"
+  ]
+  revision = "5fd1e9e9fac5261e9ad9d47c375afc014fc31d21"
+  version = "v0.1.7"
+
+[[projects]]
+  name = "sigs.k8s.io/controller-tools"
+  packages = [
+    "cmd/controller-gen",
+    "pkg/crd/generator",
+    "pkg/crd/util",
+    "pkg/generate/rbac",
+    "pkg/internal/codegen",
+    "pkg/internal/codegen/parse",
+    "pkg/util"
+  ]
+  revision = "38b2f3f497ed6b8ea5d2844ecf00c28ac4b5c2c4"
+  version = "v0.1.6"
+
+[[projects]]
+  branch = "master"
+  name = "sigs.k8s.io/testing_frameworks"
+  packages = [
+    "integration",
+    "integration/internal"
+  ]
+  revision = "5818a3a284a11812aaed11d5ca0bcadec2c50e83"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "110057b07c88ee606b2dae02760372c144c81a60489bc0e39179503d8cff8505"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
new file mode 100644
index 0000000..815522b
--- /dev/null
+++ b/Gopkg.toml
@@ -0,0 +1,45 @@
+required = [
+    "github.com/emicklei/go-restful",
+    "github.com/onsi/ginkgo", # for test framework
+    "github.com/onsi/gomega", # for test matchers
+    "k8s.io/client-go/plugin/pkg/client/auth/gcp", # for development against gcp
+    "k8s.io/code-generator/cmd/client-gen", # for go generate
+    "k8s.io/code-generator/cmd/deepcopy-gen", # for go generate
+    "sigs.k8s.io/controller-tools/cmd/controller-gen", # for crd/rbac generation
+    "sigs.k8s.io/controller-runtime/pkg/client/config",
+    "sigs.k8s.io/controller-runtime/pkg/controller",
+    "sigs.k8s.io/controller-runtime/pkg/handler",
+    "sigs.k8s.io/controller-runtime/pkg/manager",
+    "sigs.k8s.io/controller-runtime/pkg/runtime/signals",
+    "sigs.k8s.io/controller-runtime/pkg/source",
+    "sigs.k8s.io/testing_frameworks/integration", # for integration testing
+    "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
+    "google.golang.org/api/compute/v1",
+    "google.golang.org/api/redis/v1",
+    "google.golang.org/api/storage/v1",
+    ]
+
+ignored=["sigs.k8s.io/controller-reconciler*", "github.com/kubernetes-sigs/controller-reconciler*"]
+
+[prune]
+  go-tests = true
+
+#[[constraint]]
+#  branch = "master"
+#  name = "github.com/kubernetes-sigs/application"
+
+# STANZAS BELOW ARE GENERATED AND MAY BE WRITTEN - DO NOT MODIFY BELOW THIS LINE.
+
+[[constraint]]
+  name="sigs.k8s.io/controller-runtime"
+  version="v0.1.1"
+
+[[constraint]]
+  name="sigs.k8s.io/controller-tools"
+  version="v0.1.1"
+
+# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799
+[[override]]
+name = "gopkg.in/fsnotify.v1"
+source = "https://github.com/fsnotify/fsnotify.git"
+version="v1.4.7"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..6639b55
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,169 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+   1. Definitions.
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+   END OF TERMS AND CONDITIONS
+   APPENDIX: How to apply the Apache License to your work.
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+   Copyright [yyyy] [name of copyright owner]
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+       http://www.apache.org/licenses/LICENSE-2.0
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..cf55221
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,81 @@
+ifndef NOTGCP
+  PROJECT_ID := $(shell gcloud config get-value project)
+  ZONE := $(shell gcloud config get-value compute/zone)
+  SHORT_SHA := $(shell git rev-parse --short HEAD)
+  IMG ?= gcr.io/${PROJECT_ID}/airflow-operator:${SHORT_SHA}
+endif
+
+# Image URL to use all building/pushing image targets
+
+all: test manager
+
+# Run tests
+test: generate fmt vet manifests
+	ln -s ../../../templates/ pkg/controller/airflowbase/ || true
+	ln -s ../../../templates/ pkg/controller/airflowcluster/ || true
+	go test ./pkg/... ./cmd/... -coverprofile cover.out
+
+# Build manager binary
+manager: generate fmt vet
+	go build -o bin/manager k8s.io/airflow-operator/cmd/manager
+
+# Run against the configured Kubernetes cluster in ~/.kube/config
+run: generate fmt vet
+	go run ./cmd/manager/main.go
+
+# Run against the configured Kubernetes cluster in ~/.kube/config
+debug: generate fmt vet
+	dlv debug cmd/manager/main.go
+
+# Install CRDs into a cluster
+install: manifests
+	kubectl apply -f config/crds
+	kubectl apply -f hack/appcrd.yaml
+
+# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
+deploy: install
+	kustomize build config/default | kubectl apply -f -
+
+# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
+undeploy: manifests
+	kustomize build config/default | kubectl delete -f -
+	kubectl delete -f config/crds || true
+	kubectl delete -f hack/appcrd.yaml || true
+
+# Generate manifests e.g. CRD, RBAC etc.
+manifests:
+	go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all
+
+# Run go fmt against code
+fmt:
+	go fmt ./pkg/... ./cmd/...
+
+# Run go vet against code
+vet:
+	go vet ./pkg/... ./cmd/...
+
+# Generate code
+generate:
+	echo ${IMG}
+	go generate ./pkg/... ./cmd/...
+
+# Build the docker image
+docker-build: test
+	docker build . -t ${IMG}
+	@echo "updating kustomize image patch file for manager resource"
+	sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml
+
+# Push the docker image
+docker-push: docker-build
+	docker push ${IMG}
+
+
+e2e-test:
+	kubectl get namespace airflowop-system || kubectl create namespace airflowop-system
+	go test -v -timeout 20m test/e2e/base_test.go --namespace airflowop-system
+	go test -v -timeout 20m test/e2e/cluster_test.go --namespace airflowop-system
+
+e2e-test-gcp:
+	kubectl get namespace airflowop-system || kubectl create namespace airflowop-system
+	kubectl apply -f hack/sample/cloudsql-celery/sqlproxy-secret.yaml -n airflowop-system
+	go test -v -timeout 20m test/e2e/gcp_test.go --namespace airflowop-system
diff --git a/README b/README
deleted file mode 100644
index e69de29..0000000
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3b92eed
--- /dev/null
+++ b/README.md
@@ -0,0 +1,49 @@
+[![Go Report Card](https://goreportcard.com/badge/github.com/GoogleCloudPlatform/airflow-operator)](https://goreportcard.com/report/github.com/GoogleCloudPlatform/airflow-operator)
+
+**This is not an officially supported Google product.**
+
+## Community
+
+* Join our [Slack channel](https://kubernetes.slack.com/messages/CC1UAMYSV).
+
+## Project Status
+
+*Alpha*
+
+The Airflow Operator is still under active development and has not been extensively tested in production environment. Backward compatibility of the APIs is not guaranteed for alpha releases.
+
+## Prerequisites
+* Version >= 1.9 of Kubernetes.
+* Uses 1.9 of Airflow (1.10.1+ for k8s executor)
+* Uses 4.0.x of Redis (for celery operator)
+* Uses 5.7 of MySQL
+
+## Get Started
+
+[One Click Deployment](https://console.cloud.google.com/marketplace/details/google/airflow-operator) from Google Cloud Marketplace to your [GKE cluster](https://cloud.google.com/kubernetes-engine/)
+
+Get started quickly with the Airflow Operator using the [Quick Start Guide](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/quickstart.md)
+
+For more information check the [Design](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/design.md) and detailed [User Guide](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/userguide.md)
+
+## Airflow Operator Overview
+Airflow Operator is a custom [Kubernetes operator](https://coreos.com/blog/introducing-operators.html) that makes it easy to deploy and manage [Apache Airflow](https://airflow.apache.org/) on Kubernetes. Apache Airflow is a platform to programmatically author, schedule and monitor workflows. Using the Airflow Operator, an Airflow cluster is split into 2 parts represented by the `AirflowBase` and `AirflowCluster` custom resources.
+The Airflow Operator performs these jobs:
+* Creates and manages the necessary Kubernetes resources for an Airflow deployment.
+* Updates the corresponding Kubernetes resources when the `AirflowBase` or `AirflowCluster` specification changes.
+* Restores managed Kubernetes resources that are deleted.
+* Supports creation of Airflow schedulers with different Executors
+* Supports sharing of the `AirflowBase` across mulitple `AirflowClusters`
+
+Checkout out the [Design](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/design.md)
+
+![Airflow Cluster](docs/airflow-cluster.png)
+
+
+## Development
+
+Refer to the [Design](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/design.md) and [Development Guide](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/development.md).
+
+## Managed Airflow solution
+
+[Google Cloud Composer](https://cloud.google.com/composer/) is a fully managed workflow orchestration service targeting customers that need a workflow manager in the cloud.
diff --git a/cloudbuild.yaml b/cloudbuild.yaml
new file mode 100644
index 0000000..c17cd03
--- /dev/null
+++ b/cloudbuild.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Google Cloud Build script
+#
+
+steps:
+- name: 'gcr.io/cloud-builders/docker'
+  args: ['build', '.', '-t', 'gcr.io/airflow-operator/airflow-operator:v1alpha2', '-t', 'gcr.io/airflow-operator/airflow-operator:$REVISION_ID', '-f', 'Dockerfile' ]
+
+images: ['gcr.io/airflow-operator/airflow-operator:v1alpha2', 'gcr.io/airflow-operator/airflow-operator:$REVISION_ID']
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
new file mode 100644
index 0000000..512b0cf
--- /dev/null
+++ b/cmd/manager/main.go
@@ -0,0 +1,81 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"os"
+
+	"k8s.io/airflow-operator/pkg/apis"
+	"k8s.io/airflow-operator/pkg/controller"
+	"k8s.io/airflow-operator/pkg/webhook"
+	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+	"sigs.k8s.io/controller-runtime/pkg/client/config"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+	"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
+	"time"
+)
+
+func main() {
+	logf.SetLogger(logf.ZapLogger(false))
+	log := logf.Log.WithName("entrypoint")
+
+	// Get a config to talk to the apiserver
+	log.Info("setting up client for manager")
+	cfg, err := config.GetConfig()
+	if err != nil {
+		log.Error(err, "unable to set up client config")
+		os.Exit(1)
+	}
+
+	// Create a new Cmd to provide shared dependencies and start components
+	log.Info("setting up manager")
+	syncperiod := time.Minute * 2
+	mgr, err := manager.New(cfg, manager.Options{SyncPeriod: &syncperiod})
+	if err != nil {
+		log.Error(err, "unable to set up overall controller manager")
+		os.Exit(1)
+	}
+
+	log.Info("Registering Components.")
+
+	// Setup Scheme for all resources
+	log.Info("setting up scheme")
+	if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
+		log.Error(err, "unable add APIs to scheme")
+		os.Exit(1)
+	}
+
+	// Setup all Controllers
+	log.Info("Setting up controller")
+	if err := controller.AddToManager(mgr); err != nil {
+		log.Error(err, "unable to register controllers to the manager")
+		os.Exit(1)
+	}
+
+	log.Info("setting up webhooks")
+	if err := webhook.AddToManager(mgr); err != nil {
+		log.Error(err, "unable to register webhooks to the manager")
+		os.Exit(1)
+	}
+
+	// Start the Cmd
+	log.Info("Starting the Cmd.")
+	if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
+		log.Error(err, "unable to run the manager")
+		os.Exit(1)
+	}
+}
diff --git a/config/crds/airflow_v1alpha1_airflowbase.yaml b/config/crds/airflow_v1alpha1_airflowbase.yaml
new file mode 100644
index 0000000..3b97406
--- /dev/null
+++ b/config/crds/airflow_v1alpha1_airflowbase.yaml
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: airflowbases.airflow.k8s.io
+spec:
+  group: airflow.k8s.io
+  names:
+    kind: AirflowBase
+    plural: airflowbases
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          type: string
+        kind:
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            affinity:
+              type: object
+            annotations:
+              type: object
+            labels:
+              type: object
+            mysql:
+              properties:
+                backup:
+                  properties:
+                    schedule:
+                      type: string
+                    storage:
+                      properties:
+                        config:
+                          type: object
+                        secretRef:
+                          type: object
+                        storageprovider:
+                          type: string
+                      required:
+                      - storageprovider
+                      type: object
+                  required:
+                  - schedule
+                  - storage
+                  type: object
+                backupVolumeClaimTemplate:
+                  type: object
+                image:
+                  type: string
+                operator:
+                  type: boolean
+                replicas:
+                  format: int32
+                  type: integer
+                resources:
+                  type: object
+                version:
+                  type: string
+                volumeClaimTemplate:
+                  type: object
+              type: object
+            nodeSelector:
+              type: object
+            postgres:
+              properties:
+                image:
+                  type: string
+                operator:
+                  type: boolean
+                replicas:
+                  format: int32
+                  type: integer
+                resources:
+                  type: object
+                version:
+                  type: string
+                volumeClaimTemplate:
+                  type: object
+              type: object
+            sqlproxy:
+              properties:
+                image:
+                  type: string
+                instance:
+                  type: string
+                project:
+                  type: string
+                region:
+                  type: string
+                resources:
+                  type: object
+                type:
+                  type: string
+                version:
+                  type: string
+              required:
+              - project
+              - region
+              - instance
+              - type
+              type: object
+            storage:
+              properties:
+                image:
+                  type: string
+                resources:
+                  type: object
+                version:
+                  type: string
+                volumeClaimTemplate:
+                  type: object
+              type: object
+          type: object
+        status:
+          properties:
+            components:
+              items:
+                properties:
+                  group:
+                    type: string
+                  kind:
+                    type: string
+                  link:
+                    type: string
+                  name:
+                    type: string
+                  pdb:
+                    properties:
+                      currenthealthy:
+                        format: int32
+                        type: integer
+                      desiredhealthy:
+                        format: int32
+                        type: integer
+                    required:
+                    - currenthealthy
+                    - desiredhealthy
+                    type: object
+                  status:
+                    type: string
+                  sts:
+                    properties:
+                      currentcount:
+                        format: int32
+                        type: integer
+                      progress:
+                        format: int32
+                        type: integer
+                      readycount:
+                        format: int32
+                        type: integer
+                      replicas:
+                        format: int32
+                        type: integer
+                    required:
+                    - replicas
+                    - readycount
+                    - currentcount
+                    - progress
+                    type: object
+                type: object
+              type: array
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    format: date-time
+                    type: string
+                  lastUpdateTime:
+                    format: date-time
+                    type: string
+                  message:
+                    type: string
+                  reason:
+                    type: string
+                  status:
+                    type: string
+                  type:
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+            observedGeneration:
+              format: int64
+              type: integer
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/config/crds/airflow_v1alpha1_airflowcluster.yaml b/config/crds/airflow_v1alpha1_airflowcluster.yaml
new file mode 100644
index 0000000..ca3cbef
--- /dev/null
+++ b/config/crds/airflow_v1alpha1_airflowcluster.yaml
@@ -0,0 +1,373 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: airflowclusters.airflow.k8s.io
+spec:
+  group: airflow.k8s.io
+  names:
+    kind: AirflowCluster
+    plural: airflowclusters
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          type: string
+        kind:
+          type: string
+        metadata:
+          type: object
+        spec:
+          properties:
+            affinity:
+              type: object
+            airflowbase:
+              type: object
+            annotations:
+              type: object
+            config:
+              properties:
+                airflow:
+                  type: object
+                airflowsecret:
+                  items:
+                    type: object
+                  type: array
+              type: object
+            dags:
+              properties:
+                gcs:
+                  properties:
+                    bucket:
+                      type: string
+                    once:
+                      type: boolean
+                  type: object
+                git:
+                  properties:
+                    branch:
+                      type: string
+                    cred:
+                      type: object
+                    once:
+                      type: boolean
+                    repo:
+                      type: string
+                    rev:
+                      type: string
+                    user:
+                      type: string
+                  required:
+                  - repo
+                  type: object
+                nfspv:
+                  type: object
+                storage:
+                  properties:
+                    config:
+                      type: object
+                    secretRef:
+                      type: object
+                    storageprovider:
+                      type: string
+                  required:
+                  - storageprovider
+                  type: object
+                subdir:
+                  type: string
+              type: object
+            executor:
+              type: string
+            flower:
+              properties:
+                image:
+                  type: string
+                replicas:
+                  format: int32
+                  type: integer
+                resources:
+                  type: object
+                version:
+                  type: string
+              type: object
+            labels:
+              type: object
+            memoryStore:
+              properties:
+                alternativeLocationId:
+                  type: string
+                authorizedNetwork:
+                  type: string
+                locationId:
+                  type: string
+                maxMemoryPolicy:
+                  type: string
+                memorySizeGb:
+                  format: int64
+                  type: integer
+                notifyKeyspaceEvents:
+                  type: string
+                project:
+                  type: string
+                redisConfigs:
+                  type: object
+                redisVersion:
+                  type: string
+                region:
+                  type: string
+                status:
+                  properties:
+                    components:
+                      items:
+                        properties:
+                          group:
+                            type: string
+                          kind:
+                            type: string
+                          link:
+                            type: string
+                          name:
+                            type: string
+                          pdb:
+                            properties:
+                              currenthealthy:
+                                format: int32
+                                type: integer
+                              desiredhealthy:
+                                format: int32
+                                type: integer
+                            required:
+                            - currenthealthy
+                            - desiredhealthy
+                            type: object
+                          status:
+                            type: string
+                          sts:
+                            properties:
+                              currentcount:
+                                format: int32
+                                type: integer
+                              progress:
+                                format: int32
+                                type: integer
+                              readycount:
+                                format: int32
+                                type: integer
+                              replicas:
+                                format: int32
+                                type: integer
+                            required:
+                            - replicas
+                            - readycount
+                            - currentcount
+                            - progress
+                            type: object
+                        type: object
+                      type: array
+                    conditions:
+                      items:
+                        properties:
+                          lastTransitionTime:
+                            format: date-time
+                            type: string
+                          lastUpdateTime:
+                            format: date-time
+                            type: string
+                          message:
+                            type: string
+                          reason:
+                            type: string
+                          status:
+                            type: string
+                          type:
+                            type: string
+                        required:
+                        - type
+                        - status
+                        type: object
+                      type: array
+                    createTime:
+                      type: string
+                    currentLocationId:
+                      type: string
+                    host:
+                      type: string
+                    observedGeneration:
+                      format: int64
+                      type: integer
+                    port:
+                      format: int64
+                      type: integer
+                    state:
+                      type: string
+                    statusMessage:
+                      type: string
+                  type: object
+                tier:
+                  type: string
+              required:
+              - project
+              - region
+              type: object
+            nodeSelector:
+              type: object
+            redis:
+              properties:
+                additionalargs:
+                  type: string
+                image:
+                  type: string
+                operator:
+                  type: boolean
+                redisHost:
+                  type: string
+                redisPassword:
+                  type: boolean
+                redisPort:
+                  type: string
+                resources:
+                  type: object
+                version:
+                  type: string
+                volumeClaimTemplate:
+                  type: object
+              type: object
+            scheduler:
+              properties:
+                database:
+                  type: string
+                dbuser:
+                  type: string
+                image:
+                  type: string
+                resources:
+                  type: object
+                version:
+                  type: string
+              type: object
+            ui:
+              properties:
+                image:
+                  type: string
+                replicas:
+                  format: int32
+                  type: integer
+                resources:
+                  type: object
+                version:
+                  type: string
+              type: object
+            worker:
+              properties:
+                image:
+                  type: string
+                replicas:
+                  format: int32
+                  type: integer
+                resources:
+                  type: object
+                version:
+                  type: string
+              type: object
+          type: object
+        status:
+          properties:
+            components:
+              items:
+                properties:
+                  group:
+                    type: string
+                  kind:
+                    type: string
+                  link:
+                    type: string
+                  name:
+                    type: string
+                  pdb:
+                    properties:
+                      currenthealthy:
+                        format: int32
+                        type: integer
+                      desiredhealthy:
+                        format: int32
+                        type: integer
+                    required:
+                    - currenthealthy
+                    - desiredhealthy
+                    type: object
+                  status:
+                    type: string
+                  sts:
+                    properties:
+                      currentcount:
+                        format: int32
+                        type: integer
+                      progress:
+                        format: int32
+                        type: integer
+                      readycount:
+                        format: int32
+                        type: integer
+                      replicas:
+                        format: int32
+                        type: integer
+                    required:
+                    - replicas
+                    - readycount
+                    - currentcount
+                    - progress
+                    type: object
+                type: object
+              type: array
+            conditions:
+              items:
+                properties:
+                  lastTransitionTime:
+                    format: date-time
+                    type: string
+                  lastUpdateTime:
+                    format: date-time
+                    type: string
+                  message:
+                    type: string
+                  reason:
+                    type: string
+                  status:
+                    type: string
+                  type:
+                    type: string
+                required:
+                - type
+                - status
+                type: object
+              type: array
+            observedGeneration:
+              format: int64
+              type: integer
+          type: object
+  version: v1alpha1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
new file mode 100644
index 0000000..fd64183
--- /dev/null
+++ b/config/default/kustomization.yaml
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# Adds namespace to all resources.
+namespace: airflowop-system
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: airflowop-
+
+# Labels to add to all resources and selectors.
+#commonLabels:
+#  someName: someValue
+
+# Each entry in this list must resolve to an existing
+# resource definition in YAML.  These are the resource
+# files that kustomize reads, modifies and emits as a
+# YAML string, with resources separated by document
+# markers ("---").
+resources:
+- ./rbac/rbac_role.yaml
+- ./rbac/rbac_role_binding.yaml
+- ./manager/manager.yaml
+
+patches:
+- manager_image_patch.yaml
+
+vars:
+- name: WEBHOOK_SECRET_NAME
+  objref:
+    kind: Secret
+    name: webhook-server-secret
+    apiVersion: v1
diff --git a/config/default/manager/manager.yaml b/config/default/manager/manager.yaml
new file mode 100644
index 0000000..9dd4c7f
--- /dev/null
+++ b/config/default/manager/manager.yaml
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: controller-manager-service
+  namespace: system
+  labels:
+    control-plane: controller-manager
+    controller-tools.k8s.io: "1.0"
+spec:
+  selector:
+    control-plane: controller-manager
+    controller-tools.k8s.io: "1.0"
+  ports:
+  - port: 443
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: controller-manager
+  namespace: system
+  labels:
+    control-plane: controller-manager
+    controller-tools.k8s.io: "1.0"
+spec:
+  selector:
+    matchLabels:
+      control-plane: controller-manager
+      controller-tools.k8s.io: "1.0"
+  serviceName: controller-manager-service
+  template:
+    metadata:
+      labels:
+        control-plane: controller-manager
+        controller-tools.k8s.io: "1.0"
+    spec:
+      containers:
+      - command:
+        - /root/manager
+        image: controller:latest
+        imagePullPolicy: Always
+        name: manager
+        env:
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.namespace
+          - name: SECRET_NAME
+            value: $(WEBHOOK_SECRET_NAME)
+        resources:
+          limits:
+            cpu: 100m
+            memory: 30Mi
+          requests:
+            cpu: 100m
+            memory: 20Mi
+        ports:
+        - containerPort: 9876
+          name: webhook-server
+          protocol: TCP
+        volumeMounts:
+        - mountPath: /tmp/cert
+          name: cert
+          readOnly: true
+      terminationGracePeriodSeconds: 10
+      volumes:
+      - name: cert
+        secret:
+          defaultMode: 420
+          secretName: webhook-server-secret
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: webhook-server-secret
+  namespace: system
diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml
new file mode 100644
index 0000000..3b35de1
--- /dev/null
+++ b/config/default/manager_image_patch.yaml
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: controller-manager
+  namespace: system
+spec:
+  template:
+    spec:
+      containers:
+      # Change the value of image field below to your controller image URL
+      - image: gcr.io/kubeflow-193622/airflow-operator:db92567
+        name: manager
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cluster-admin
diff --git a/config/default/rbac/rbac_role.yaml b/config/default/rbac/rbac_role.yaml
new file mode 100644
index 0000000..bd3fb46
--- /dev/null
+++ b/config/default/rbac/rbac_role.yaml
@@ -0,0 +1,191 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: manager-role
+rules:
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - airflow.k8s.io
+  resources:
+  - airflowbases
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - airflow.k8s.io
+  resources:
+  - airflowclusters
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - app.k8s.io
+  resources:
+  - applications
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - rolebindings
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - serviceaccounts
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - mutatingwebhookconfigurations
+  - validatingwebhookconfigurations
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
diff --git a/config/default/rbac/rbac_role_binding.yaml b/config/default/rbac/rbac_role_binding.yaml
new file mode 100644
index 0000000..80cd916
--- /dev/null
+++ b/config/default/rbac/rbac_role_binding.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  creationTimestamp: null
+  name: manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: manager-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: system
diff --git a/config/rbac/rbac_role.yaml b/config/rbac/rbac_role.yaml
new file mode 100644
index 0000000..bd3fb46
--- /dev/null
+++ b/config/rbac/rbac_role.yaml
@@ -0,0 +1,191 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: manager-role
+rules:
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - airflow.k8s.io
+  resources:
+  - airflowbases
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - airflow.k8s.io
+  resources:
+  - airflowclusters
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - app.k8s.io
+  resources:
+  - applications
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - rolebindings
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - serviceaccounts
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - mutatingwebhookconfigurations
+  - validatingwebhookconfigurations
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
diff --git a/config/rbac/rbac_role_binding.yaml b/config/rbac/rbac_role_binding.yaml
new file mode 100644
index 0000000..80cd916
--- /dev/null
+++ b/config/rbac/rbac_role_binding.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  creationTimestamp: null
+  name: manager-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: manager-role
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: system
diff --git a/config/samples/airflow_v1alpha1_airflowbase.yaml b/config/samples/airflow_v1alpha1_airflowbase.yaml
new file mode 100644
index 0000000..5ac0be7
--- /dev/null
+++ b/config/samples/airflow_v1alpha1_airflowbase.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowBase
+metadata:
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: airflowbase-sample
+spec:
+  # Add fields here
+  foo: bar
diff --git a/config/samples/airflow_v1alpha1_airflowcluster.yaml b/config/samples/airflow_v1alpha1_airflowcluster.yaml
new file mode 100644
index 0000000..14b9145
--- /dev/null
+++ b/config/samples/airflow_v1alpha1_airflowcluster.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  labels:
+    controller-tools.k8s.io: "1.0"
+  name: airflowcluster-sample
+spec:
+  # Add fields here
+  foo: bar
diff --git a/docs/airflow-base.png b/docs/airflow-base.png
new file mode 100644
index 0000000..ceed123
Binary files /dev/null and b/docs/airflow-base.png differ
diff --git a/docs/airflow-cluster.png b/docs/airflow-cluster.png
new file mode 100644
index 0000000..b22784b
Binary files /dev/null and b/docs/airflow-cluster.png differ
diff --git a/docs/airflow-multi-node.png b/docs/airflow-multi-node.png
new file mode 100644
index 0000000..624f90d
Binary files /dev/null and b/docs/airflow-multi-node.png differ
diff --git a/docs/airflow-pod.png b/docs/airflow-pod.png
new file mode 100644
index 0000000..af4945c
Binary files /dev/null and b/docs/airflow-pod.png differ
diff --git a/docs/airflow-region-spread.png b/docs/airflow-region-spread.png
new file mode 100644
index 0000000..d38c249
Binary files /dev/null and b/docs/airflow-region-spread.png differ
diff --git a/docs/airflow-zone-spread.png b/docs/airflow-zone-spread.png
new file mode 100644
index 0000000..b81f0e3
Binary files /dev/null and b/docs/airflow-zone-spread.png differ
diff --git a/docs/api.md b/docs/api.md
new file mode 100644
index 0000000..172b327
--- /dev/null
+++ b/docs/api.md
@@ -0,0 +1,256 @@
+# Airflow Operator Custom Resource (API)
+The Airflow Operator uses these [CustomResourceDefinitions](https://kubernetes.io/docs/concepts/api-n/custom-resources/):
+
+`AirflowBase` includes MySQL, UI, NFS(DagStore).  
+`AirflowCluster` includes Airflow Scheduler, Workers, Redis.  
+
+Multiple `AirflowCluster` could use the same `AirflowBase`. The way custom resources are defined allows multi-single-tenant (multiple single users) usecases, where users use different airflow plugins (opeartors, packages etc) in their set
+up. This improves cluster utilization and provide multiple users (in same trust domain) with some isolation.
+
+## AirflowBase API
+ 
+| **Field** | **json field**| **Type** | **Info** |
+| --- | --- | --- | --- |
+| Spec | `spec` | [AirflowBaseSpec](#AirflowBaseSpec) | The specfication for Airflow Base cusotm resource |
+| Status | `status` | AirflowBaseStatus | The status for the custom resource |
+
+#### AirflowBaseSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| NodeSelector | map[string]string | `nodeSelector` | [Selector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node) for fitting pods to nodes whose labels match the selector |
+| Affinity | \*corev1.Affinity | `affinity` | Define scheduling constraints for pods |
+| Annotations | map[string]string | `annotations` | Custom annotations to be added to the pods |
+| Labels | map[string]string | `labels` | Custom labels to be added to the pods |
+| MySQL | \*MySQLSpec | `mysql` | Spec for MySQL component |
+| Storage | \*NFSStoreSpec | `storage` | Spec for NFS component |
+| UI | \*AirflowUISpec | `ui` | Spec for Airflow UI component |
+| SQLProxy | \*SQLProxySpec | `sqlproxy` | Spec for SQLProxy component. Ignored if SQL(MySQLSpec) is specified |
+
+
+#### MySQLSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image` | Image defines the MySQL Docker image name |
+| Version | string  | `version` | Version defines the MySQL Docker image version |
+| Replicas | int32 | `replicas` | Replicas defines the number of running MySQL instances in a cluster |
+| VolumeClaimTemplate | \*corev1.PersistentVolumeClaim | `volumeClaimTemplate` | VolumeClaimTemplate allows a user to specify volume claim for MySQL Server files |
+| BackupVolumeClaimTemplate | \*corev1.PersistentVolumeClaim | `backupVolumeClaimTemplate` | BackupVolumeClaimTemplate allows a user to specify a volume to temporarily store the data for a backup prior to it being shipped to object storage |
+| Operator | bool  | `operator` | Flag when True generates MySQLOperator CustomResource to be handled by MySQL Operator If False, a StatefulSet with 1 replica is created (not for production setups) |
+| Backup | \*MySQLBackup | `backup` | Spec defining the Backup Custom Resource to be handled by MySQLOperator Ignored when Operator is False |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods |
+| Options | map[string]string | ` ` | command line options for mysql |
+
+
+#### MySQLBackup 
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Schedule | string | `schedule` | Schedule is the cron string used to schedule backup|
+| Storage | StorageSpec | `storage` | Storage has the s3 compatible storage spec|
+
+
+##### StorageSpec 
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| StorageProvider | string | `storageprovider` | Provider is the storage type used for backup and restore e.g. s3, oci-s3-compat, aws-s3, gce-s3, etc |
+| SecretRef | \*corev1.LocalObjectReference | `secretRef` | SecretRef is a reference to the Kubernetes secret containing the configuration for uploading the backup to authenticated storage |
+| Config | map[string]string | `config` | Config is generic string based key-value map that defines non-secret configuration values for uploading the backup to storage w.r.t the configured storage provider |
+
+#### NFSStoreSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image` | Image defines the NFS Docker image.|
+| Version | string | `version` | Version defines the NFS Server Docker image version.|
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods.|
+| Volume | \*corev1.PersistentVolumeClaim | `volumeClaimTemplate` | Volume allows a user to specify volume claim template to be used for fileserver|
+
+#### SQLProxySpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image` | Image defines the SQLProxy Docker image name|
+| Version | string | `version` | Version defines the SQL Proxy docker image version.  example: myProject:us-central1:myInstance=tcp:3306|
+| Project | string | `project` | Project defines the SQL instance project|
+| Region | string | `region` | Region defines the SQL instance region|
+| Instance | string | `instance` | Instance defines the SQL instance name|
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods.|
+
+
+#### AirflowBaseStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| ObservedGeneration | int64 | `observedGeneration` |	ObservedGeneration is the last generation of the AirflowBase as observed by the controller |
+| MySQL | ComponentStatus | `mysql` | MySQL is the status of the MySQL component |
+| UI | ComponentStatus | `ui` | UI is the status of the Airflow UI component |
+| Storage | ComponentStatus | `storage` | Storage is the status of the NFS component |
+| SQLProxy | ComponentStatus | `sqlproxy` | SQLProxy is the status of the SQLProxy component |
+| LastError | string | `lasterror` | LastError |
+| Status | string | `status`| 	Reaedy or Pending |
+
+##  AirflowCluster
+
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Spec  | AirflowClusterSpec | `spec` | |
+| Status | AirflowClusterStatus | `status` | |
+
+#### AirflowClusterSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| NodeSelector | map[string]string | `nodeSelector` | [Selector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node) for fitting pods to nodes whose labels match the selector |
+| Affinity | \*corev1.Affinity | `affinity` | Define scheduling constraints for pods. |
+| Annotations | map[string]string | `annotations` | Custom annotations to be added to the pods. |
+| Labels | map[string]string | `labels` | Custom labels to be added to the pods. |
+| Executor | string | `executor` | Airflow Executor desired: local,celery,kubernetes |
+| Redis | \*RedisSpec | `redis` | Spec for Redis component. |
+| Scheduler | \*SchedulerSpec | `scheduler` | Spec for Airflow Scheduler component. |
+| Worker | \*WorkerSpec | `worker` | Spec for Airflow Workers |
+| UI | \*AirflowUISpec | `ui` | Spec for Airflow UI component. |
+| Flower | \*FlowerSpec | `flower` | Spec for Flower component. |
+| DAGs | \*DagSpec | `dags` | Spec for DAG source and location |
+| AirflowBaseRef | \*corev1.LocalObjectReference | `airflowbase` | AirflowBaseRef is a reference to the AirflowBase CR |
+
+#### AirflowClusterStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| ObservedGeneration | int64 | `observedGeneration"` | ObservedGeneration is the last generation of the AirflowCluster as observed by the controller. |
+| Redis | ComponentStatus | `redis` | Redis is the status of the Redis component |
+| Scheduler | SchedulerStatus | `scheduler` | Scheduler is the status of the Airflow Scheduler component |
+| Worker | ComponentStatus | `worker` | Worker is the status of the Workers |
+| UI | ComponentStatus | `ui` | UI is the status of the Airflow UI component |
+| Flower | ComponentStatus | `flower` | Flower is the status of the Airflow UI component |
+| LastError | string | `lasterror` | LastError |
+| Status | string | `status` | Status |
+
+#### RedisSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image"` | Image defines the Redis Docker image name |
+| Operator | bool | `operator` | Version defines the Redis Docker image version.  Flag when True generates RedisReplica CustomResource to be handled by Redis Operator If False, a StatefulSet with 1 replica is created |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods. |
+| VolumeClaimTemplate | \*corev1.PersistentVolumeClaim | `volumeClaimTemplate` | VolumeClaimTemplate allows a user to specify volume claim for MySQL Server files |
+| AdditionalArgs | string | `additionalargs` | AdditionalArgs for redis-server |
+
+#### RedisSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image"` | Image defines the Redis Docker image name |
+| Version | string | `version"` | Version defines the Redis Docker image version. |
+| Operator | bool | `operator` | Flag when True generates RedisReplica CustomResource to be handled by Redis Operator If False, a StatefulSet with 1 replica is created |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods. |
+| VolumeClaimTemplate | \*corev1.PersistentVolumeClaim | `volumeClaimTemplate` | VolumeClaimTemplate allows a user to specify volume claim for MySQL Server files |
+| AdditionalArgs | string | `additionalargs` | AdditionalArgs for redis-server |
+
+#### FlowerSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image"` | Image defines the Flower Docker image. |
+| Version | string | `version"` | Version defines the Flower Docker image version. |
+| Replicas | int32 | `replicas` | Replicas defines the number of running Flower instances in a cluster |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods. |
+
+#### SchedulerSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image"` | Image defines the Airflow custom server Docker image. |
+| Version | string | `version"` | Version defines the Airflow Docker image version |
+| DBName | string | `database"` | DBName defines the Airflow Database to be used |
+| DBUser | string | `dbuser"` | DBUser defines the Airflow Database user to be used |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods. |
+
+#### WorkerSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image"` | Image defines the Airflow worker Docker image. |
+| Version | string | `version"` | Version defines the Airflow worker Docker image version |
+| Replicas | int32 | `replicas` | Replicas is the count of number of workers |
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods. |
+
+#### AirflowUISpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Image | string | `image` | Image defines the AirflowUI Docker image.|
+| Version | string | `version` | Version defines the AirflowUI Docker image version.|
+| Replicas | int32 | `replicas` | Replicas defines the number of running Airflow UI instances in a cluster|
+| Resources | corev1.ResourceRequirements | `resources` | Resources is the resource requests and limits for the pods.|
+
+#### GCSSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Bucket | string | `bucket` | Bucket describes the GCS bucket |
+| Once | bool | `once` | Once syncs initially and quits (use init container instead of sidecar) |
+
+#### GitSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Repo | string | `repo,"` | Repo describes the http/ssh uri for git repo |
+| Branch | string | `branch` | Branch describes the branch name to be synced |
+| Rev | string | `rev` | Rev is the git hash to be used for syncing |
+| User | string | `user` | User for git access |
+| Once | bool | `once` | Once syncs initially and quits (use init container instead of sidecar) |
+| CredSecretRef | \*corev1.LocalObjectReference | `cred` | Reference to a Secret that has git credentials in field `password`. It is injected as env `GIT_SYNC_PASSWORD` in [git-sync](https://github.com/kubernetes/git-sync) container.Refer to how the `password` is [used in git-sync](https://github.com/kubernetes/git-sync/blob/40e188fb26ecad2d8174e486fc104939c6b1271d/cmd/git-sync/main.go#L477:6) |
+
+#### DagSpec
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| DagSubdir | string | `subdir` | DagSubdir is the directory under source where the dags are present |
+| Git | \*GitSpec | `git` | GitSpec defines details to pull DAGs from a git repo using github.com/kubernetes/git-sync sidecar |
+| NfsPV | \*corev1.PersistentVolumeClaim | `nfspv` | NfsPVSpec |
+| Storage | \*StorageSpec | `storage` | Storage has s3 compatible storage spec for copying files from |
+| GCS | \*GCSSpec | `gcs` | Gcs config which uses storage spec |
+
+#### SchedulerStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Resources | ComponentStatus | `resources` | Status is a string describing Scheduler status |
+| DagCount | int32 | `dagcount` | DagCount is a count of number of Dags observed |
+| RunCount | int32 | `runcount` | RunCount is a count of number of Dag Runs observed |
+
+## Common
+
+#### ComponentStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| STS | []StsStatus | `sts` | StatefulSet status|
+| SVC | []SvcStatus | `svc` | Service status|
+| PDB | []PdbStatus | `pdb` | PDB status|
+| LastError | string | `lasterror` | LastError|
+| Status | string | `status` | Status|
+
+#### StsStatus 
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Link | string | `link` | Link to sts|
+| Name | string | `name` | Name of sts|
+| Status | string | `status` | Status to rsrc|
+| Replicas | int32 | `replicas` | Replicas defines the no of MySQL instances desired|
+| ReadyReplicas | int32 | `readycount` | ReadyReplicas defines the no of MySQL instances that are ready|
+| CurrentReplicas | int32 | `currentcount` | CurrentReplicas defines the no of MySQL instances that are created|
+
+#### SvcStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Link | string | `link` | Link to rsrc|
+| Name | string | `name` | service name|
+| Status | string | `status` | Status to rsrc|
+
+#### PdbStatus
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| Link | string | `link` | Link to rsrc|
+| Name | string | `name` | Name of pdb|
+| Status | string | `status` | Status to rsrc|
+| CurrentHealthy | int32 | `currenthealthy` | currentHealthy|
+| DesiredHealthy | int32 | `desiredhealthy` | desiredHealthy|
+
+#### ResourceRequests
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| CPU | string | `cpu` | Cpu is the amount of CPU requested for a pod.|
+| Memory | string | `memory` | Memory is the amount of RAM requested for a Pod.|
+| Disk | string | `disk` | Disk is the amount of Disk requested for a pod.|
+| DiskStorageClass | string | `diskStorageClass` | DiskStorageClass is the storage class for Disk.  Disk must be present or this field is invalid.|
+
+#### ResourceLimits
+| **Field** | **Type** | **json field** | **Info** |
+| --- | --- | --- | --- |
+| CPU | string | `cpu` | Cpu is the CPU limit for a pod.|
+| Memory | string | `memory` | Memory is the RAM limit for a pod.|
diff --git a/docs/design.md b/docs/design.md
new file mode 100644
index 0000000..4f19d8c
--- /dev/null
+++ b/docs/design.md
@@ -0,0 +1,127 @@
+# Introduction
+
+### Airflow
+Apache Airflow is a platform to programmatically author, schedule and monitor workflows. Airflow is an Apache incubator project that is mature and has a good community momentum.
+
+![Airflow Multi Node](airflow-multi-node.png)
+
+### Kubernetes (k8s)
+Kubernetes(k8s) is a distributed workload orchestration system similar to Borg or Mesos. K8s can orchestrate containerized workloads on bare metal or on VMs and provides primitives modelled in a declarative way to manage the compute, memory, storage, networking, isolation, and life cycle. K8s can be accessed via kubectl(cli) or the APIs that consume the declarative specs. In the backend, the intent in the declarative specs are fulfilled by k8s core controllers that take actions (like cre [...]
+
+### What is an Operator in Kubernetes
+In case of some stateful applications, the declarative models provided by kubernetes are not sufficient to handle fault remediation, scaling with data integrity and availability. This is where an K8s Operator pattern excels. An operator encapsulates an application's operational domain knowledge and enable common day-N operations. An operator is an application-specific controller that extends the Kubernetes API to create, configure and manage stateful applications. An operator API is impl [...]
+
+`K8s Operator = K8s API +  CRD(declarative spec) + Custom Controller`
+
+The goal is to ensure that Kubernetes works well as a substrate for deploying Airflow. 
+# Airflow Operator Custom Resource (API)
+The Airflow operator API is implemented by extending the k8s API with Custom Resources Definitions (CRDs) that declaratively describes the intent. Custom resources are serializable as json and are stored in the API Server. The Airflow controller watches these Custom Resources and take actions to move the Airflow cluster to the desired state. All CRDs inherit the contents of ObjectMeta and TypeMeta that are common to k8s resources. 
+To improve cluster utilization and provide multiple users (in same trust domain) with some isolation, we are splitting the Airflow components into `AirflowBase` (common) and `AirflowCluster` (per user). `AirflowBase` includes MySQL, UI, NFS(DagStore). `AirflowCluster` includes Airflow Scheduler, Workers, Redis. This allows use cases where different users use different airflow plugins (opeartors, packages etc) in their setup.
+
+The [API Design](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/api.md) details the `AirflowBase` and `AirflowCluster` Custom Resource fields.
+
+# Airflow Operator Custom Controllers
+
+## AirflowBase Controller
+AirflowBase controller  watches for AirflowBase CR and fulfils the intent. The intent translates to creation, updation or deletion of Statefulsets, Services, PVCs for MySQL, NFS.
+
+![Airflow Base](airflow-base.png)
+
+#### MySQL
+If MySQLSpec is specified and .operator is False a simple statefulset with a single instance of MySQL is deployed. This is not recommended for production deployments. For production cases, use CloudSQL connected using SQLProxy.
+
+TODO
+If .operator is True, a MySQLCluster and MySQLBackupSchedule CRs are created to deploy a MySQL cluster. MySQL Operator needs to be installed for handling the Custom Resources. 
+
+#### StatefulSet
+MySQL, SQLProxy, NFS cluster are all deployed as stateful sets. StatefulSet creates the desired number of pods and ensures pod-hostname to volume mapping. This is useful when a pod dies and a new pod takes its place. The network identity of the new pod is set to that of the pod being replaced. Similarly the volume mounted on the old pod is moved to the new pod. 
+
+#### EndPoint (Service)
+The AirflowUI and NFS cluster are exposed via a service for use by the users and AirflowClusters.
+
+#### Persistent Volumes and claims
+NFS cluster and MySQL use PVCs for data durability in the face of temporary compute degradation. Persistent Volume(PV) matching the Persistent Volume Claim(PVC)  is used when pods are created. If a matching PV is not found, dynamic provisioning is used to provision a PV and attached to the PVC created by the StatefulSets. For an elastic scalable service, dynamic provisioning is prefered. 
+
+#### Pods
+StatefulSet creates Pods. For NFS and MySQL, Persistent Volumes are attached to the Pod based on the PVC in the StatefulSet spec.  The AirflowUI and SQL-Proxy pods are simple single purpose pods and do not typically need data persistence. In case of MySQL controller, the details of MySQL pods can be found here. 
+
+## AirflowCluster Controller
+
+AirflowCluster Controller watches for AirflowCluster CR and fulfils the intent. The intent translates to creation, updation or deletion of Statefulsets, PVCs for Airflow UI, Scheduler, Workers and Redis. This Custom Resource allows users to spin-up their own Airflow clusters for providing DAG level isolation between users. This enables multi-user (same trust domain, tenant) Airflow deployment. It also allows users to iterate faster with one-time use schedulers. Isolation in SQL is achiev [...]
+
+TODO
+It could drain celery workers nodes and k8s executor pods to prepare for upgrade.
+Restarting airflow UI and Scheduler on detecting new DAGs in the DAG folder.
+
+![Airflow Cluster](airflow-cluster.png)
+
+#### Redis
+RedisSpec is required if .Spec.executor is celery. 
+If .operator is False a simple statefulset with a single instance of Redis is deployed. Since redis is used as a non persistent cache, either can be used for production.
+
+TODO
+If .operator is True, a  RedisReplicas custom resource is created to deploy Redis. Redis Operator needs to be installed for handling the Custom Resources. 
+
+#### StatefulSet
+Airflow Scheduler and workers are deployed as stateful sets. StatefulSet creates the desired number of pods and ensures pod-name to volume mapping. The network identity of the new pod is set to that of the pod being replaced. Similarly the volume mounted on the old pod is moved to the new pod. 
+
+#### EndPoint (service)
+Since none of the services are exposed outside, Service definition is not needed. If in future we support clustered redis, we may use Service to front that cluster.
+
+#### Persistent Volumes and claims
+Airflow scheduler and workers could use PVCs for mounting data volumes that contain the DAGs. The PVCs need to be [RWX(read write many) or ROX(read only many)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+
+### Airflow Pods
+Airflow Pods have the airflow container (scheduler,worker) and DAG Sidecar container. The DAG sidecar gets DAGs from the configured DAG source. In case of PVC as DAG source, the backing PV needs to be  mounted as RWX (RW many) or ROX (RO many). 
+
+![Airflow Pods](airflow-pod.png)
+
+
+# Telemetry
+## Logging
+The airflow operator logs to the standard output and error file streams which is recommended for logging in containerized workloads. The K8s cluster level logging captures these logs in a central place. For airflow components it is recommended to log to the standard file streams as well.
+
+## Metrics
+The controller uses `pbweb/airflow-prometheus-exporter:latest` as a side-car to expose prometheus metircs for Airflow on port `9112`.
+
+# Failure Modes and Remediation
+Failures modes of the AirflowCluster, AirflowBase and Airflow controller are considered in addition to general failure modes.
+
+## Process
+If any of the pod processes dies or crashes, kubelet detects container exit and restarts it. Also Pod readiness and liveness checks are used to detect and restart Pods that have functionally degraded.
+
+## Node
+One could use  cluster.Spec.Affinity.*.topologyKey as “kubernetes.io/hostname” to spread Pods across Nodes within a Zone. This would limit the effect of node failures within a zone. K8s would reschedule the pods from failed node to another available node. Non-local PVs are moved to the new node and attached to the new pod. This means the user would experience reduced capacity during the time a new pod is created and added back to the cluster. For statefulsets with just 1 replica, we rely [...]
+
+![Airflow Zone](airflow-zone-spread.png)
+
+## Zonal
+One could use  cluster.Spec.Affinity.*.topologyKey as “failure-domain.beta.kubernetes.io/zone” to spread Pods across Zones within a Region in a cluster that is deployed across zones. This would limit the effect of Zonal and node failures within a region.
+
+![Airflow Region](airflow-region-spread.png)
+
+
+Since PVs are zonal resources on most Cloud Providers, Pods may not be rescheduled into another available zone. This means the user may experience permanent reduced capacity unless the PV is unlinked manually and added back.
+
+## Control plane
+Deployed Airflow components do not require the operator control plane for normal operation. However if the cluster health degrades due to a failure, control plane is needed to recover and restore it to full health. Operator control plane health is monitored by kubernetes and recovered. Once recovered the operator would resync with the Airflow CRs and heal them if needed.
+
+## Storage
+Single instance storage failures would manifest and degraded Pod health and would be rescheduled. But since the PVC association is binding, rescheduled pods may face the same issue. 
+
+TODO: on PV failure, detach PV from PVC and get a new PV ?
+
+# Security
+## Authn
+The k8s API server uses authentication tokens in order to authorize requests. This model is a standard and is integrated with the IAM providers on most public clouds. MySQL operator managed MySQL instances are protected by a password that is either user provided in a Secret or auto generated and saved in a Secret. K8s IAM and RBAC protects this Secret. Similarly Redis operator managed Redis cluster provides data protection. 
+TODO: Security for airflow components and NFS DAGs
+
+## Authz
+The Kubernetes API server provides for configurable RBAC (Role Based Access Control). The Airflow operator operates with a particular service account that would need appropriate RBAC to create and modify StatefulSet, Service, ConfigMap, Secrets and Pods. Same applies to MySQL operator and Redis operators if used.
+
+## Data in Flight
+MySQL supports encrypted connections. Clients should use ssl to connect to MySQL server. Similarly Redis offers secure connection to clients. Airflow UI enables SSL for secure HTTP access. 
+TODO: Confirm Airflow uses secured connections to MySQL and Redis
+
+## Data at Rest
+MySQL operator and Redis operator ensures secure data at rest. K8s API data is encrypted at the application layer in the API server. For  DAG volumes, one could use TDE (Transparent Disk Encryption) for both the PVs and Object Storage.
diff --git a/docs/development.md b/docs/development.md
new file mode 100644
index 0000000..76355d2
--- /dev/null
+++ b/docs/development.md
@@ -0,0 +1,71 @@
+# Development
+You should have kubeconfig setup to point to your cluster.
+In case you want to build the Airflow Operator from the source code, e.g., to test a fix or a feature you write, you can do so following the instructions below.
+
+## Cloning the repo:
+```bash
+$ mkdir -p $GOPATH/src/k8s.io
+$ cd $GOPATH/src/k8s.io
+$ git clone git@github.com:GoogleCloudPlatform/airflow-operator.git
+```
+
+## Building and running locally:
+```bash
+# build
+make build
+
+# run locally
+make run
+```
+## Building docker image 
+#### GCP
+When working with GCP ensure that gcloud is setup and gcr(container registry) is enabled for the current project.
+If not set IMG env to point to the desired registry image.
+```bash
+# building docker image
+make docker-build
+
+# push docker image
+make docker-push
+```
+
+
+### Non GCP
+Set IMG env to point to the desired registry image.
+```bash
+# building docker image
+make docker-build NOTGCP=true
+
+# push docker image
+make docker-push NOTGCP=true
+```
+## Running in cluster
+```bash
+# assumes kubeconfig is setup correctly
+make deploy
+```
+
+
+## Tests
+
+### Running local tests
+Runs unit-tests locally
+
+```bash
+make test
+```
+
+### Running e2e tests
+Before running e2e tests ensure that the desrired version is running on the cluster or locally.
+
+```bash
+# Start controller in cluster:
+#   make docker-push
+#   make deploy
+# OR locally:
+#   make install
+#   make run
+# and then run the tests
+
+make e2e-test
+```
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 0000000..bdb9ca4
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,123 @@
+# Quick Start
+
+## Deploy from GCP Marketplace
+[One Click Deployment](https://pantheon.corp.google.com/marketplace/details/google/airflow-operator) from Google Cloud Marketplace to your [GKE cluster](https://cloud.google.com/kubernetes-engine/). The marketplace may not have the latest version of the operator. If you need to deploy from latest master continue reading.
+
+## Running from source
+Refer to the [Development Guide](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/development.md).
+
+## Installing on any cluster
+Ensure kubeconfig points to your cluster.
+Due to a [known issue](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#defining_permissions_in_a_role) in GKE, you will need to first grant yourself cluster-admin privileges before you can create custom roles and role bindings on a GKE cluster versioned 1.6 and up.
+```bash
+# grant admin 
+$ kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user your-account-email
+```
+
+### Install CRDs
+Install the AirflowBase and AirflowCluster CRDs.
+The AirflowBase and AirflowCluster CRs result in [Application CRs](https://github.com/kubernetes-sigs/application) being created. Install Application CRD to see the Applications in the [GCP console](https://pantheon.corp.google.com/kubernetes/application)
+```bash
+# install Application and Airflow CRD
+$ make install
+```
+
+### Build operator docker image
+
+```bash
+# First we need to build the docker image for the controller
+# Set this to the name of the docker registry and image you want to use
+$ export IMG=gcr.io/myproject/airflow-controller:latest 
+
+# Build and push
+$ make docker-push
+```
+
+### Deploying Airflow Operator using manifests
+
+Installing the airflow operator creates the 'airflowop-system' namespace and creates stateful set in that namespace for the operator.
+
+```bash
+# deploy the airflow operator
+$ make deploy
+
+# follow airflow controller logs in a terminal session
+$ kubectl logs -f airflowop-controller-manager-0 -n airflowop-system
+
+# to undeploy
+$ #make undeploy
+```
+
+## Create Airflow clusters using samples
+
+The `hack/sample/` directory contains sample Airflow CRs
+
+#### Deploy MySQL based samples
+
+```bash
+# deploy base components first
+$ kubectl apply -f hack/sample/mysql-celery/base.yaml
+# after 30-60s deploy cluster components 
+# using celery + git as DAG source
+$ kubectl apply -f hack/sample/mysql-celery/cluster.yaml
+# port forward to access the UI
+$ kubectl port-forward mc-cluster-airflowui-0 8080:8080
+# port forward to access the Flower
+$ kubectl port-forward mc-cluster-flower-0 5555:5555
+# get status of the CRs
+$ kubectl get airflowbase/mc-base -o yaml 
+$ kubectl get airflowcluster/mc-cluster -o yaml 
+
+# Against the same mc-base, we could deploy another cluster.
+# celery + gcs as DAG source (you need to update to point to your gcs bucket)
+$ kubectl apply -f hack/sample/mysql-celery-gcs/cluster.yaml
+$ kubectl port-forward mcg-cluster-airflowui-0 8081:8080
+$ kubectl get airflowcluster/mcg-cluster -o yaml 
+```
+
+#### Deploy Postgres based samples
+
+```bash
+# deploy base components first
+$ kubectl apply -f hack/sample/postgres-celery/base.yaml
+# after 30-60s deploy cluster components
+# using celery + git as DAG source
+$ kubectl apply -f hack/sample/postgres-celery/cluster.yaml
+# port forward to access the UI
+$ kubectl port-forward pc-cluster-airflowui-0 8080:8080
+# port forward to access the Flower
+$ kubectl port-forward pc-cluster-flower-0 5555:5555
+# get status of the CRs
+$ kubectl get airflowbase/pc-base -o yaml
+$ kubectl get airflowcluster/pc-cluster -o yaml
+
+# Against the same mc-base, we could deploy another cluster.
+# celery + gcs as DAG source (you need to update to point to your gcs bucket)
+$ kubectl apply -f hack/sample/mysql-celery-gcs/cluster.yaml
+$ kubectl port-forward mcg-cluster-airflowui-0 8081:8080
+$ kubectl get airflowcluster/mcg-cluster -o yaml
+```
+
+#### Running CloudSQL based samples
+CloudSQL(mysql)  needs to be setup on your project.
+A root password needs to be created for the CloudSQL.
+The information about the project, region, instance needs to be updated in hack/samples/cloudsql-celery/base.yaml.
+A secret containing the root password as "rootpassword" needs to be created with the name "cc-base-sql" (base.name + "-sql"). Update the hack/sample/cloudsql-celery/sqlproxy-secret.yaml
+
+```bash
+# create secret
+$ kubectl apply -f hack/sample/cloudsql-celery/sqlproxy-secret.yaml
+# deploy base components first
+$ kubectl apply -f hack/sample/cloudsql-celery/base.yaml
+# after 30-60s deploy cluster components
+$ kubectl apply -f hack/sample/cloudsql-celery/cluster.yaml
+# port forward to access the UI (port 8082)
+$ kubectl port-forward cc-cluster-airflowui-0 8082:8080
+# get status of the CRs
+$ kubectl get airflowbase/cc-base -o yaml 
+$ kubectl get airflowcluster/cc-cluster -o yaml 
+```
+
+## Next steps
+
+For more information check the [Design](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/design.md) and detailed [User Guide](https://github.com/GoogleCloudPlatform/airflow-operator/blob/master/docs/userguide.md) to create your own cluster specs.
diff --git a/docs/userguide.md b/docs/userguide.md
new file mode 100644
index 0000000..6aac99e
--- /dev/null
+++ b/docs/userguide.md
@@ -0,0 +1,37 @@
+# User Guide
+
+TODO
+
+# FAQs
+
+1. How do we refresh DAGs ?
+Canonical way airflow supports refreshing DAGs is via `dag_dir_list_interval` config.
+https://cwiki.apache.org/confluence/display/AIRFLOW/Scheduler+Basics#Configuration
+You can set that config using `cluster.spec.config.airflow`
+Set the env `AIRFLOW__SCHEDULER__ DAG_DIR_LIST_INTERVAL`
+By default dags are refreshed every 5 minutes.
+To enable continuous sync, use git or gcs dag source with once disabled.
+
+```yaml
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+...
+spec:
+  ...
+  config:
+    airflow:
+      AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL: 100 # default is 300s
+  ...
+  dags:
+    subdir: ""
+    gcs:
+      bucket: "mydags"
+  # OR
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: false
+```
+
+
diff --git a/hack/appcrd.yaml b/hack/appcrd.yaml
new file mode 100644
index 0000000..da1e5bf
--- /dev/null
+++ b/hack/appcrd.yaml
@@ -0,0 +1,142 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  labels:
+    api: default
+    kubebuilder.k8s.io: 0.1.10
+  name: applications.app.k8s.io
+spec:
+  group: app.k8s.io
+  names:
+    kind: Application
+    plural: applications
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          type: string
+        kind:
+          type: string
+        metadata:
+          type: object
+        spec:
+          type: object
+          properties:
+            selector:
+              type: object
+            assemblyPhase:
+              type: string
+            componentKinds:
+              items:
+                type: object
+              type: array
+            description:
+              type: string
+            info:
+              items:
+                properties:
+                  name:
+                    type: string
+                  type:
+                    type: string
+                  value:
+                    type: string
+                  valueFrom:
+                    properties:
+                      configMapKeyRef:
+                        properties:
+                          key:
+                            type: string
+                        type: object
+                      ingressRef:
+                        properties:
+                          host:
+                            type: string
+                          path:
+                            type: string
+                        type: object
+                      secretKeyRef:
+                        properties:
+                          key:
+                            type: string
+                        type: object
+                      serviceRef:
+                        properties:
+                          path:
+                            type: string
+                          port:
+                            type: int32
+                        type: object
+                      type:
+                        type: string
+                    type: object
+                type: object
+              type: array
+            descriptor:
+              type: object
+              properties:
+                keywords:
+                  items:
+                    type: string
+                  type: array
+                links:
+                  items:
+                    properties:
+                      description:
+                        type: string
+                      url:
+                        type: string
+                    type: object
+                  type: array
+                maintainers:
+                  items:
+                    properties:
+                      email:
+                        type: string
+                      name:
+                        type: string
+                      url:
+                        type: string
+                    type: object
+                  type: array
+                notes:
+                  type: string
+                owners:
+                  items:
+                    type: string
+                  type: array
+                type:
+                  type: string
+                version:
+                  type: string
+        status:
+          properties:
+            observedGeneration:
+              type: int64
+          type: object
+      type: object
+  version: v1beta1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
new file mode 100644
index 0000000..ea7414f
--- /dev/null
+++ b/hack/boilerplate.go.txt
@@ -0,0 +1,15 @@
+/*
+Copyright 2018 Google LLC.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
\ No newline at end of file
diff --git a/hack/sample/cloudsql-celery/base.yaml b/hack/sample/cloudsql-celery/base.yaml
new file mode 100644
index 0000000..592b720
--- /dev/null
+++ b/hack/sample/cloudsql-celery/base.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowBase
+metadata:
+  name: cc-base
+spec:
+  sqlproxy:
+          project: kubeflow-193622
+          region: us-central1
+          instance: airflow-test1
+          type: postgres
+  storage:
+    version: ""
diff --git a/hack/sample/cloudsql-celery/cluster.yaml b/hack/sample/cloudsql-celery/cluster.yaml
new file mode 100644
index 0000000..a4ec6dc
--- /dev/null
+++ b/hack/sample/cloudsql-celery/cluster.yaml
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: cc-cluster
+spec:
+  executor: Celery
+  redis:
+    operator: False
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+  airflowbase:
+    name: cc-base
diff --git a/hack/sample/cloudsql-celery/sqlproxy-secret.yaml b/hack/sample/cloudsql-celery/sqlproxy-secret.yaml
new file mode 100644
index 0000000..c4b2a38
--- /dev/null
+++ b/hack/sample/cloudsql-celery/sqlproxy-secret.yaml
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: v1
+kind: Secret
+metadata:
+  name: cc-base-sql
+type: Opaque
+data:
+  rootpassword: cm9vdDEyMw== 
diff --git a/hack/sample/cloudsql-k8s/cluster.yaml b/hack/sample/cloudsql-k8s/cluster.yaml
new file mode 100644
index 0000000..aeaab4c
--- /dev/null
+++ b/hack/sample/cloudsql-k8s/cluster.yaml
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: ck-cluster
+spec:
+  executor: Kubernetes
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  scheduler:
+    version: "1.10.2"
+  worker:
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+      branch: master
+  airflowbase:
+    name: cc-base
diff --git a/hack/sample/cloudsql-local/cluster.yaml b/hack/sample/cloudsql-local/cluster.yaml
new file mode 100644
index 0000000..0c4434e
--- /dev/null
+++ b/hack/sample/cloudsql-local/cluster.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: cl-cluster
+spec:
+  executor: Local
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  airflowbase:
+    name: cc-base
diff --git a/hack/sample/mysql-celery-gcs/cluster.yaml b/hack/sample/mysql-celery-gcs/cluster.yaml
new file mode 100644
index 0000000..bd2d597
--- /dev/null
+++ b/hack/sample/mysql-celery-gcs/cluster.yaml
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: mcg-cluster
+spec:
+  executor: Celery
+  redis:
+    operator: False
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  dags:
+    subdir: ""
+    gcs:
+      bucket: "mydags"
+  airflowbase:
+    name: mc-base
diff --git a/hack/sample/mysql-celery/base.yaml b/hack/sample/mysql-celery/base.yaml
new file mode 100644
index 0000000..11c2bc3
--- /dev/null
+++ b/hack/sample/mysql-celery/base.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowBase
+metadata:
+  name: mc-base
+spec:
+  mysql:
+    operator: False
+  storage:
+    version: ""
diff --git a/hack/sample/mysql-celery/cluster.yaml b/hack/sample/mysql-celery/cluster.yaml
new file mode 100644
index 0000000..71c4a2e
--- /dev/null
+++ b/hack/sample/mysql-celery/cluster.yaml
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: mc-cluster
+spec:
+  executor: Celery
+  config:
+    airflow:
+      AIRFLOW_SOME_CONFIG: SomeValue
+  redis:
+    operator: False
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+  airflowbase:
+    name: mc-base
diff --git a/hack/sample/mysql-k8s/cluster.yaml b/hack/sample/mysql-k8s/cluster.yaml
new file mode 100644
index 0000000..8817c4c
--- /dev/null
+++ b/hack/sample/mysql-k8s/cluster.yaml
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: mk-cluster
+spec:
+  executor: Kubernetes
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  scheduler:
+    version: "1.10.2"
+  worker:
+    #image: "gcr.io/airflow-development-225219/airflow"
+    #version: "demo-nr4"
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+      branch: master
+  airflowbase:
+    name: mc-base
diff --git a/hack/sample/mysql-local/cluster.yaml b/hack/sample/mysql-local/cluster.yaml
new file mode 100644
index 0000000..ebf7b8c
--- /dev/null
+++ b/hack/sample/mysql-local/cluster.yaml
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: ml-cluster
+spec:
+  executor: Local
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  airflowbase:
+    name: mc-base
+    #  config:
+    #airflow:
+    #  AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL: "100"
+    #  AIRFLOW__WEBSERVER__AUTHENTICATE: "True"
+    #  AIRFLOW__WEBSERVER__AUTH_BACKEND: "airflow.contrib.auth.backends.password_auth"
diff --git a/hack/sample/postgres-celery-memorystore/cluster.yaml b/hack/sample/postgres-celery-memorystore/cluster.yaml
new file mode 100644
index 0000000..5c72b2e
--- /dev/null
+++ b/hack/sample/postgres-celery-memorystore/cluster.yaml
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: pc-cluster
+spec:
+  executor: Celery
+  memoryStore:
+    project: "project-id"
+    region: "region-id"
+    memorySizeGb: 1
+    tier: "basic"
+    maxMemoryPolicy: "allkeys-lru"
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+  airflowbase:
+    name: pc-base
diff --git a/hack/sample/postgres-celery-redis/cluster.yaml b/hack/sample/postgres-celery-redis/cluster.yaml
new file mode 100644
index 0000000..2d0ca7d
--- /dev/null
+++ b/hack/sample/postgres-celery-redis/cluster.yaml
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: pc-cluster
+spec:
+  executor: Celery
+  redis:
+    operator: False
+    redisHost: "redis"
+    redisPassword: True
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+  airflowbase:
+    name: pc-base
diff --git a/hack/sample/postgres-celery-redis/redis-secret.yaml b/hack/sample/postgres-celery-redis/redis-secret.yaml
new file mode 100644
index 0000000..e891e1c
--- /dev/null
+++ b/hack/sample/postgres-celery-redis/redis-secret.yaml
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: v1
+kind: Secret
+metadata:
+  name: pc-cluster-redis
+type: Opaque
+data:
+  password: aGVsbG93b3JsZA==
diff --git a/hack/sample/postgres-celery-redis/redis.yaml b/hack/sample/postgres-celery-redis/redis.yaml
new file mode 100644
index 0000000..dc6b712
--- /dev/null
+++ b/hack/sample/postgres-celery-redis/redis.yaml
@@ -0,0 +1,59 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: redis
+spec:
+  ports:
+    - port: 6379
+      name: redis
+  selector:
+    app: redis
+---
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+  name: redis
+spec:
+  selector:
+    matchLabels:
+      app: redis  # has to match .spec.template.metadata.labels
+  serviceName: redis
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: redis  # has to match .spec.selector.matchLabels
+    spec:
+      containers:
+        - name: redis
+          image: redis:4.0
+          imagePullPolicy: Always
+          args:
+          - --requirepass
+          - $(REDIS_PASSWORD)
+          env:
+          - name: REDIS_PASSWORD
+            valueFrom:
+              secretKeyRef:
+                key: password
+                name: pc-cluster-redis
+          ports:
+            - containerPort: 6379
+              name: redis
diff --git a/hack/sample/postgres-celery/base.yaml b/hack/sample/postgres-celery/base.yaml
new file mode 100644
index 0000000..e728093
--- /dev/null
+++ b/hack/sample/postgres-celery/base.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowBase
+metadata:
+  name: pc-base
+spec:
+  postgres:
+    operator: False
+  storage:
+    version: ""
diff --git a/hack/sample/postgres-celery/cluster.yaml b/hack/sample/postgres-celery/cluster.yaml
new file mode 100644
index 0000000..bfea43a
--- /dev/null
+++ b/hack/sample/postgres-celery/cluster.yaml
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: pc-cluster
+spec:
+  executor: Celery
+  redis:
+    operator: False
+  scheduler:
+    version: "1.10.2"
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  worker:
+    replicas: 2
+    version: "1.10.2"
+  flower:
+    replicas: 1
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+  airflowbase:
+    name: pc-base
diff --git a/hack/sample/postgres-k8s/cluster.yaml b/hack/sample/postgres-k8s/cluster.yaml
new file mode 100644
index 0000000..e5da34a
--- /dev/null
+++ b/hack/sample/postgres-k8s/cluster.yaml
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: pk-cluster
+spec:
+  executor: Kubernetes
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  scheduler:
+    version: "1.10.2"
+  worker:
+    version: "1.10.2"
+  dags:
+    subdir: "airflow/example_dags/"
+    git:
+      repo: "https://github.com/apache/incubator-airflow/"
+      once: true
+      branch: master
+  airflowbase:
+    name: pc-base
diff --git a/hack/sample/postgres-local/cluster.yaml b/hack/sample/postgres-local/cluster.yaml
new file mode 100644
index 0000000..f3eeecc
--- /dev/null
+++ b/hack/sample/postgres-local/cluster.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+apiVersion: airflow.k8s.io/v1alpha1
+kind: AirflowCluster
+metadata:
+  name: pl-cluster
+spec:
+  executor: Local
+  ui:
+    replicas: 1
+    version: "1.10.2"
+  scheduler:
+    version: "1.10.2"
+  airflowbase:
+    name: pc-base
diff --git a/pkg/apis/addtoscheme_airflow_v1alpha1.go b/pkg/apis/addtoscheme_airflow_v1alpha1.go
new file mode 100644
index 0000000..c581952
--- /dev/null
+++ b/pkg/apis/addtoscheme_airflow_v1alpha1.go
@@ -0,0 +1,25 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apis
+
+import (
+	"k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+)
+
+func init() {
+	// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+	AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
+}
diff --git a/pkg/apis/airflow/group.go b/pkg/apis/airflow/group.go
new file mode 100644
index 0000000..b476ef4
--- /dev/null
+++ b/pkg/apis/airflow/group.go
@@ -0,0 +1,17 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package airflow contains airflow API versions
+package airflow
diff --git a/pkg/apis/airflow/v1alpha1/airflowbase_types.go b/pkg/apis/airflow/v1alpha1/airflowbase_types.go
new file mode 100644
index 0000000..600fcdf
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/airflowbase_types.go
@@ -0,0 +1,560 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"sigs.k8s.io/controller-reconciler/pkg/finalizer"
+	"sigs.k8s.io/controller-reconciler/pkg/status"
+)
+
+// defaults and constant strings
+const (
+	DefaultMySQLImage      = "mysql"
+	DefaultMySQLVersion    = "5.7"
+	DefaultPostgresImage   = "postgres"
+	DefaultPostgresVersion = "9.5"
+	defaultUIImage         = "gcr.io/airflow-operator/airflow"
+	defaultUIVersion       = "1.10.2"
+	defaultFlowerVersion   = "1.10.2"
+	defaultNFSVersion      = "0.8"
+	defaultNFSImage        = "k8s.gcr.io/volume-nfs"
+	defaultSQLProxyImage   = "gcr.io/cloud-airflow-public/airflow-sqlproxy"
+	defaultSQLProxyVersion = "1.8.0"
+	defaultSchedule        = "0 0 0 ? * * *`" // daily@midnight
+	defaultDBReplicas      = 1
+	defaultOperator        = false
+	defaultStorageProvider = "s3"
+	providerS3             = "s3"
+	StatusReady            = "Ready"
+	StatusInProgress       = "InProgress"
+	StatusDisabled         = "Disabled"
+	DatabaseMySQL          = "MySQL"
+	DatabasePostgres       = "Postgres"
+	DatabaseSQLProxy       = "SQLProxy"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AirflowBase represents the components required for an Airflow scheduler and worker to
+// function. At a minimum they need a SQL service (MySQL or SQLProxy) and Airflow UI.
+// In addition for an installation with minimal external dependencies, NFS and Airflow UI
+// are also added.
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:path=airflowbases
+type AirflowBase struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   AirflowBaseSpec   `json:"spec,omitempty"`
+	Status AirflowBaseStatus `json:"status,omitempty"`
+}
+
+// AirflowBaseStatus defines the observed state of AirflowBase
+type AirflowBaseStatus struct {
+	status.Meta          `json:",inline"`
+	status.ComponentMeta `json:",inline"`
+}
+
+// AirflowBaseSpec defines the desired state of AirflowBase
+type AirflowBaseSpec struct {
+	// Selector for fitting pods to nodes whose labels match the selector.
+	// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+	// Define scheduling constraints for pods.
+	// +optional
+	Affinity *corev1.Affinity `json:"affinity,omitempty"`
+	// Custom annotations to be added to the pods.
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty"`
+	// Custom labels to be added to the pods.
+	// +optional
+	Labels map[string]string `json:"labels,omitempty"`
+	// Spec for MySQL component.
+	// +optional
+	MySQL    *MySQLSpec    `json:"mysql,omitempty"`
+	SQLProxy *SQLProxySpec `json:"sqlproxy,omitempty"`
+	Postgres *PostgresSpec `json:"postgres,omitempty"`
+	// Spec for NFS component.
+	// +optional
+	Storage *NFSStoreSpec `json:"storage,omitempty"`
+}
+
+func (s *AirflowBaseSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.MySQL == nil && s.SQLProxy == nil && s.Postgres == nil {
+		errs = append(errs, field.Required(fp.Child("database"), "Either MySQL or SQLProxy is required"))
+	}
+	return errs
+}
+
+// PostgresSpec defines the attributes and desired state of Postgres Component
+// TODO - minimum spec needed .. for now it is version: ""
+// need to consider empty mysql
+type PostgresSpec struct {
+	// Image defines the Postgres Docker image name
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the Postgres Docker image version
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Replicas defines the number of running Postgres instances in a cluster
+	// +optional
+	Replicas int32 `json:"replicas,omitempty"`
+	// VolumeClaimTemplate allows a user to specify volume claim for Postgres Server files
+	// +optional
+	VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
+	// Flag when True generates PostgresOperator CustomResource to be handled by Postgres Operator
+	// If False, a StatefulSet with 1 replica is created (not for production setups)
+	// +optional
+	Operator bool `json:"operator,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+	// Options command line options for mysql
+	Options map[string]string
+}
+
+func (s *PostgresSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.Operator == true {
+		errs = append(errs, field.Invalid(fp.Child("operator"), "", "Operator is not supported in this version"))
+	}
+
+	return errs
+}
+
+// MySQLSpec defines the attributes and desired state of MySQL Component
+// TODO - minimum spec needed .. for now it is version: ""
+// need to consider empty mysql
+type MySQLSpec struct {
+	// Image defines the MySQL Docker image name
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the MySQL Docker image version
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Replicas defines the number of running MySQL instances in a cluster
+	// +optional
+	Replicas int32 `json:"replicas,omitempty"`
+	// VolumeClaimTemplate allows a user to specify volume claim for MySQL Server files
+	// +optional
+	VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
+	// BackupVolumeClaimTemplate allows a user to specify a volume to temporarily store the
+	// data for a backup prior to it being shipped to object storage.
+	// +optional
+	BackupVolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"backupVolumeClaimTemplate,omitempty"`
+	// Flag when True generates MySQLOperator CustomResource to be handled by MySQL Operator
+	// If False, a StatefulSet with 1 replica is created (not for production setups)
+	// +optional
+	Operator bool `json:"operator,omitempty"`
+	// Spec defining the Backup Custom Resource to be handled by MySQLOperator
+	// Ignored when Operator is False
+	// +optional
+	Backup *MySQLBackup `json:"backup,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+	// Options command line options for mysql
+	Options map[string]string
+}
+
+func (s *MySQLSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.Operator == true {
+		errs = append(errs, field.Invalid(fp.Child("operator"), "", "Operator is not supported in this version"))
+	}
+	if s.Backup != nil {
+		errs = append(errs, field.Invalid(fp.Child("backup"), "", "Backup is not supported in this version"))
+	}
+
+	errs = append(errs, s.Backup.validate(fp.Child("backup"))...)
+	return errs
+}
+
+// MySQLBackup defines the Backup Custom Resource which is handled by MySQLOperator
+type MySQLBackup struct {
+	// Schedule is the cron string used to schedule backup
+	Schedule string `json:"schedule"`
+	// Storage has the s3 compatible storage spec
+	Storage StorageSpec `json:"storage"`
+}
+
+func (s *MySQLBackup) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if !validCronString(s.Schedule) {
+		errs = append(errs,
+			field.Invalid(fp.Child("schedule"),
+				s.Schedule,
+				"Invalid Schedule cron string"))
+	}
+
+	errs = append(errs, s.Storage.validate(fp.Child("storage"))...)
+
+	return errs
+}
+
+func validCronString(cron string) bool {
+	// TODO : Check cron string
+	return true
+}
+
+// StorageSpec describes the s3 compatible storage
+type StorageSpec struct {
+	// Provider is the storage type used for backup and restore
+	// e.g. s3, oci-s3-compat, aws-s3, gce-s3, etc.
+	StorageProvider string `json:"storageprovider"`
+	// SecretRef is a reference to the Kubernetes secret containing the configuration for uploading
+	// the backup to authenticated storage.
+	SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"`
+	// Config is generic string based key-value map that defines non-secret configuration values for
+	// uploading the backup to storage w.r.t the configured storage provider.
+	Config map[string]string `json:"config,omitempty"`
+}
+
+func (s *StorageSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if !validStorageProvider(s.StorageProvider) {
+		errs = append(errs,
+			field.Invalid(fp.Child("storageprovider"),
+				s.StorageProvider,
+				"Invalid Storage Provider"))
+	}
+	if s.SecretRef == nil {
+		errs = append(errs, field.Required(fp.Child("secretRef"), ""))
+	} else if s.SecretRef.Name == "" {
+		errs = append(errs, field.Required(fp.Child("secretRef", "name"), ""))
+	}
+
+	config := fp.Child("config")
+	if s.Config == nil {
+		errs = append(errs, field.Required(config, ""))
+		return errs
+	}
+
+	if s.Config["endpoint"] == "" {
+		errs = append(errs, field.Required(config.Key("endpoint"), "no storage config 'endpoint'"))
+	}
+
+	if s.Config["region"] == "" {
+		errs = append(errs, field.Required(config.Key("region"), "no storage config 'region'"))
+	}
+
+	if s.Config["bucket"] == "" {
+		errs = append(errs, field.Required(config.Key("bucket"), "no storage config 'bucket'"))
+	}
+
+	return errs
+}
+
+func validStorageProvider(provider string) bool {
+	switch provider {
+	case providerS3:
+		return true
+	}
+	return false
+}
+
+// AirflowUISpec defines the attributes to deploy Airflow UI component
+type AirflowUISpec struct {
+	// Image defines the AirflowUI Docker image.
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the AirflowUI Docker image version.
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Replicas defines the number of running Airflow UI instances in a cluster
+	// +optional
+	Replicas int32 `json:"replicas,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	// +optional
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+func (s *AirflowUISpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	//errs = append(errs, s.Resources.validate(fp.Child("resources"))...)
+	return errs
+}
+
+// NFSStoreSpec defines the attributes to deploy Airflow Storage component
+type NFSStoreSpec struct {
+	// Image defines the NFS Docker image.
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the NFS Server Docker image version.
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	// +optional
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+	// Volume allows a user to specify volume claim template to be used for fileserver
+	// +optional
+	Volume *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
+}
+
+func (s *NFSStoreSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	// TODO Volume check
+	//errs = append(errs, s.Resources.validate(fp.Child("resources"))...)
+	return errs
+}
+
+// SQLProxySpec defines the attributes to deploy SQL Proxy component
+type SQLProxySpec struct {
+	// Image defines the SQLProxy Docker image name
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the SQL Proxy docker image version.
+	// +optional
+	Version string `json:"version,omitempty"`
+	// example: myProject:us-central1:myInstance=tcp:3306
+	// Project defines the SQL instance project
+	Project string `json:"project"`
+	// Region defines the SQL instance region
+	Region string `json:"region"`
+	// Instance defines the SQL instance name
+	Instance string `json:"instance"`
+	// Type defines the SQL instance type
+	Type string `json:"type"`
+	// Resources is the resource requests and limits for the pods.
+	// +optional
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+func (s *SQLProxySpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.Project == "" {
+		errs = append(errs, field.Required(fp.Child("project"), "Missing cloudSQL Project"))
+	}
+	if s.Region == "" {
+		errs = append(errs, field.Required(fp.Child("region"), "Missing cloudSQL Region"))
+	}
+	if s.Instance == "" {
+		errs = append(errs, field.Required(fp.Child("instance"), "Missing cloudSQL Instance"))
+	}
+	return errs
+}
+
+// Resources aggregates resource requests and limits. Note that requests, if specified, must be less
+// than or equal to limits.
+type Resources struct {
+	// The amount of CPU, Memory, and Disk requested for pods.
+	// +optional
+	Requests ResourceRequests `json:"requests,omitempty"`
+	// The limit of CPU and Memory that pods may use.
+	// +optional
+	Limits ResourceLimits `json:"limits,omitempty"`
+}
+
+func (s *Resources) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	return errs
+}
+
+// ResourceRequests is used to describe the resource requests for a Redis pod.
+type ResourceRequests struct {
+	// Cpu is the amount of CPU requested for a pod.
+	// +optional
+	CPU string `json:"cpu,omitempty"`
+	// Memory is the amount of RAM requested for a Pod.
+	// +optional
+	Memory string `json:"memory,omitempty"`
+	// Disk is the amount of Disk requested for a pod.
+	// +optional
+	Disk string `json:"disk,omitempty"`
+	// DiskStorageClass is the storage class for Disk.
+	// Disk must be present or this field is invalid.
+	// +optional
+	DiskStorageClass string `json:"diskStorageClass,omitempty"`
+}
+
+// ResourceLimits is used to describe the resources limits for a Redis pod.
+// When limits are exceeded, the Pod will be terminated.
+type ResourceLimits struct {
+	// Cpu is the CPU limit for a pod.
+	// +optional
+	CPU string `json:"cpu,omitempty"`
+	// Memory is the RAM limit for a pod.
+	// +optional
+	Memory string `json:"memory,omitempty"`
+}
+
+// Helper functions for the resources
+
+// ApplyDefaults the AirflowBase
+func (b *AirflowBase) ApplyDefaults() {
+	if b.Spec.MySQL != nil {
+		if b.Spec.MySQL.Replicas == 0 {
+			b.Spec.MySQL.Replicas = defaultDBReplicas
+		}
+		if b.Spec.MySQL.Image == "" {
+			b.Spec.MySQL.Image = DefaultMySQLImage
+		}
+		if b.Spec.MySQL.Version == "" {
+			b.Spec.MySQL.Version = DefaultMySQLVersion
+		}
+		if b.Spec.MySQL.Backup != nil {
+			if b.Spec.MySQL.Backup.Storage.StorageProvider == "" {
+				b.Spec.MySQL.Backup.Storage.StorageProvider = defaultStorageProvider
+			}
+			if b.Spec.MySQL.Backup.Schedule == "" {
+				b.Spec.MySQL.Backup.Schedule = defaultSchedule
+			}
+			if b.Spec.MySQL.Backup.Storage.StorageProvider == "" {
+				b.Spec.MySQL.Backup.Storage.StorageProvider = defaultStorageProvider
+			}
+		}
+	}
+	if b.Spec.Postgres != nil {
+		if b.Spec.Postgres.Replicas == 0 {
+			b.Spec.Postgres.Replicas = defaultDBReplicas
+		}
+		if b.Spec.Postgres.Image == "" {
+			b.Spec.Postgres.Image = DefaultPostgresImage
+		}
+		if b.Spec.Postgres.Version == "" {
+			b.Spec.Postgres.Version = DefaultPostgresVersion
+		}
+	}
+	if b.Spec.Storage != nil {
+		if b.Spec.Storage.Image == "" {
+			b.Spec.Storage.Image = defaultNFSImage
+		}
+		if b.Spec.Storage.Version == "" {
+			b.Spec.Storage.Version = defaultNFSVersion
+		}
+	}
+	if b.Spec.SQLProxy != nil {
+		if b.Spec.SQLProxy.Image == "" {
+			b.Spec.SQLProxy.Image = defaultSQLProxyImage
+		}
+		if b.Spec.SQLProxy.Version == "" {
+			b.Spec.SQLProxy.Version = defaultSQLProxyVersion
+		}
+	}
+	b.Status.ComponentList = status.ComponentList{}
+	finalizer.EnsureStandard(b)
+}
+
+// HandleError records status or error in status
+func (b *AirflowBase) HandleError(err error) {
+	if err != nil {
+		b.Status.SetError("ErrorSeen", err.Error())
+	} else {
+		b.Status.ClearError()
+	}
+}
+
+// Validate the AirflowBase
+func (b *AirflowBase) Validate() error {
+	errs := field.ErrorList{}
+	spec := field.NewPath("spec")
+
+	errs = append(errs, b.Spec.validate(spec)...)
+	errs = append(errs, b.Spec.MySQL.validate(spec.Child("mysql"))...)
+	errs = append(errs, b.Spec.Storage.validate(spec.Child("storage"))...)
+	errs = append(errs, b.Spec.SQLProxy.validate(spec.Child("sqlproxy"))...)
+
+	if b.Spec.MySQL == nil && b.Spec.Postgres == nil && b.Spec.SQLProxy == nil {
+		errs = append(errs, field.Required(spec, "Either MySQL or Postgres or SQLProxy is required"))
+	}
+
+	count := 0
+	if b.Spec.Postgres != nil {
+		count++
+	}
+	if b.Spec.MySQL != nil {
+		count++
+	}
+	if b.Spec.SQLProxy != nil {
+		count++
+	}
+	if count != 1 {
+		errs = append(errs, field.Invalid(spec, "", "Only One of MySQL,Postgres,SQLProxy can be declared"))
+	}
+
+	return errs.ToAggregate()
+}
+
+// OwnerRef returns owner ref object with the component's resource as owner
+func (b *AirflowBase) OwnerRef() *metav1.OwnerReference {
+	return metav1.NewControllerRef(b, schema.GroupVersionKind{
+		Group:   SchemeGroupVersion.Group,
+		Version: SchemeGroupVersion.Version,
+		Kind:    "AirflowBase",
+	})
+}
+
+// NewAirflowBase return a defaults filled AirflowBase object
+func NewAirflowBase(name, namespace string, database string, storage bool) *AirflowBase {
+	b := AirflowBase{
+		ObjectMeta: metav1.ObjectMeta{
+			Name: name,
+			Labels: map[string]string{
+				"test": name,
+			},
+			Namespace: namespace,
+		},
+	}
+	b.Spec = AirflowBaseSpec{}
+	switch database {
+	case DatabasePostgres:
+		b.Spec.Postgres = &PostgresSpec{}
+	case DatabaseSQLProxy:
+		b.Spec.SQLProxy = &SQLProxySpec{}
+	case DatabaseMySQL:
+		fallthrough
+	default:
+		b.Spec.MySQL = &MySQLSpec{}
+	}
+	if storage {
+		b.Spec.Storage = &NFSStoreSpec{}
+	}
+	b.ApplyDefaults()
+	return &b
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AirflowBaseList contains a list of AirflowBase
+type AirflowBaseList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []AirflowBase `json:"items"`
+}
+
+func init() {
+	SchemeBuilder.Register(&AirflowBase{}, &AirflowBaseList{})
+}
diff --git a/pkg/apis/airflow/v1alpha1/airflowbase_types_test.go b/pkg/apis/airflow/v1alpha1/airflowbase_types_test.go
new file mode 100644
index 0000000..bb70b33
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/airflowbase_types_test.go
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	"testing"
+
+	"github.com/onsi/gomega"
+	"golang.org/x/net/context"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+func TestStorageAirflowBase(t *testing.T) {
+	key := types.NamespacedName{
+		Name:      "foo",
+		Namespace: "default",
+	}
+	created := &AirflowBase{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      "foo",
+			Namespace: "default",
+		}}
+	g := gomega.NewGomegaWithT(t)
+
+	// Test Create
+	fetched := &AirflowBase{}
+	g.Expect(c.Create(context.TODO(), created)).NotTo(gomega.HaveOccurred())
+
+	g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(fetched).To(gomega.Equal(created))
+
+	// Test Updating the Labels
+	updated := fetched.DeepCopy()
+	updated.Labels = map[string]string{"hello": "world"}
+	g.Expect(c.Update(context.TODO(), updated)).NotTo(gomega.HaveOccurred())
+
+	g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(fetched).To(gomega.Equal(updated))
+
+	// Test Delete
+	g.Expect(c.Delete(context.TODO(), fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.HaveOccurred())
+}
diff --git a/pkg/apis/airflow/v1alpha1/airflowcluster_types.go b/pkg/apis/airflow/v1alpha1/airflowcluster_types.go
new file mode 100644
index 0000000..109ad1b
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/airflowcluster_types.go
@@ -0,0 +1,616 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"math/rand"
+	"sigs.k8s.io/controller-reconciler/pkg/finalizer"
+	"sigs.k8s.io/controller-reconciler/pkg/status"
+	"time"
+)
+
+// defaults and constant strings
+const (
+	PasswordCharNumSpace    = "abcdefghijklmnopqrstuvwxyz0123456789"
+	PasswordCharSpace       = "abcdefghijklmnopqrstuvwxyz"
+	defaultRedisImage       = "redis"
+	defaultRedisVersion     = "4.0"
+	defaultRedisPort        = "6379"
+	defaultWorkerImage      = "gcr.io/airflow-operator/airflow"
+	defaultSchedulerImage   = "gcr.io/airflow-operator/airflow"
+	defaultFlowerImage      = "gcr.io/airflow-operator/airflow"
+	GitsyncImage            = "gcr.io/google_containers/git-sync"
+	GitsyncVersion          = "v3.0.1"
+	GCSsyncImage            = "gcr.io/cloud-airflow-releaser/gcs-syncd"
+	GCSsyncVersion          = "cloud_composer_service_2018-05-23-RC0"
+	ExecutorLocal           = "Local"
+	ExecutorCelery          = "Celery"
+	ExecutorSequential      = "Sequential"
+	ExecutorK8s             = "Kubernetes"
+	defaultExecutor         = ExecutorLocal
+	defaultBranch           = "master"
+	defaultWorkerVersion    = "1.10.2"
+	defaultSchedulerVersion = "1.10.2"
+)
+
+var (
+	random = rand.New(rand.NewSource(time.Now().UnixNano()))
+)
+
+// RandomAlphanumericString generates a random password of some fixed length.
+func RandomAlphanumericString(strlen int) []byte {
+	result := make([]byte, strlen)
+	for i := range result {
+		result[i] = PasswordCharNumSpace[random.Intn(len(PasswordCharNumSpace))]
+	}
+	result[0] = PasswordCharSpace[random.Intn(len(PasswordCharSpace))]
+	return result
+}
+
+var allowedExecutors = []string{ExecutorLocal, ExecutorSequential, ExecutorCelery, ExecutorK8s}
+
+// MemoryStoreSpec defines the attributes and desired state of MemoryStore component
+type MemoryStoreSpec struct {
+	// Project defines the SQL instance project
+	Project string `json:"project"`
+	// Region defines the SQL instance region
+	Region string `json:"region"`
+	// AlternativeLocationID - alt
+	// +optional.
+	AlternativeLocationID string `json:"alternativeLocationId,omitempty"`
+	// AuthorizedNetwork
+	// +optional.
+	AuthorizedNetwork string `json:"authorizedNetwork,omitempty"`
+	// LocationID The zone where the instance will be provisioned.
+	// +optional
+	LocationID string `json:"locationId,omitempty"`
+	// MemorySizeGb: Required. Redis memory size in GiB.
+	MemorySizeGb int64 `json:"memorySizeGb,omitempty"`
+	// RedisConfigs: Optional. Redis configuration parameters
+	RedisConfigs map[string]string `json:"redisConfigs,omitempty"`
+	// RedisVersion: Optional. The version of Redis software.
+	RedisVersion string `json:"redisVersion,omitempty"`
+	// Tier: Required. The service tier of the instance.
+	Tier string `json:"tier,omitempty"`
+	// Specifies the behavior Redis follows when the memory size limit is reached.
+	MaxMemoryPolicy string `json:"maxMemoryPolicy,omitempty"`
+	// Allows clients to subscribe to notifications on certain keyspace events
+	NotifyKeyspaceEvents string `json:"notifyKeyspaceEvents,omitempty"`
+	// Status
+	Status MemoryStoreStatus `json:"status,omitempty"`
+}
+
+func (s *MemoryStoreSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+
+	if s.Project == "" {
+		errs = append(errs, field.Required(fp.Child("project"), "Missing memoryStore Project"))
+	}
+	if s.Region == "" {
+		errs = append(errs, field.Required(fp.Child("region"), "Missing memoryStore Region"))
+	}
+
+	var allowedMaxMemoryPolicy = []string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "Volatile-ttl"}
+	allowedMMP := false
+	for _, policy := range allowedMaxMemoryPolicy {
+		if policy == s.MaxMemoryPolicy {
+			allowedMMP = true
+		}
+	}
+
+	if !allowedMMP {
+		errs = append(errs, field.Invalid(fp.Child("maxMemoryPolicy"), "", "Configuration is not allowed"))
+	}
+
+	var allowedNotifyKeyspaceEvents = []string{"", "K", "E", "g", "$", "l", "s", "h", "z", "x", "e", "A"}
+	allowedNKE := false
+	for _, event := range allowedNotifyKeyspaceEvents {
+		if event == s.NotifyKeyspaceEvents {
+			allowedNKE = true
+		}
+	}
+	if !allowedNKE {
+		errs = append(errs, field.Invalid(fp.Child("notifyKeyspaceEvent"), "", "Configuration is not allowed"))
+	}
+
+	return errs
+}
+
+// RedisSpec defines the attributes and desired state of Redis component
+type RedisSpec struct {
+	// Image defines the Redis Docker image name
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the Redis Docker image version.
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Flag when True generates RedisReplica CustomResource to be handled by Redis Operator
+	// If False, a StatefulSet with 1 replica is created
+	// +optional
+	Operator bool `json:"operator,omitempty"`
+	// Hostname or IP of existing Redis instance
+	RedisHost string `json:"redisHost,omitempty"`
+	// Port of existing Redis instance
+	RedisPort string `json:"redisPort,omitempty"`
+	// If the existing Redis instance uses password or not, as MemoryStore doesn't support password yet
+	RedisPassword bool `json:"redisPassword,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+	// VolumeClaimTemplate allows a user to specify volume claim for MySQL Server files
+	// +optional
+	VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
+	// AdditionalArgs for redis-server
+	// +optional
+	AdditionalArgs string `json:"additionalargs,omitempty"`
+}
+
+func (s *RedisSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.Operator == true {
+		errs = append(errs, field.Invalid(fp.Child("operator"), "", "Operator is not supported in this version"))
+	}
+	return errs
+}
+
+// FlowerSpec defines the attributes to deploy Flower component
+type FlowerSpec struct {
+	// Image defines the Flower Docker image.
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the Flower Docker image version.
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Replicas defines the number of running Flower instances in a cluster
+	Replicas int32 `json:"replicas,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	// +optional
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+func (s *FlowerSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	return errs
+}
+
+// SchedulerSpec defines the attributes and desired state of Airflow Scheduler
+type SchedulerSpec struct {
+	// Image defines the Airflow custom server Docker image.
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the Airflow Docker image version
+	// +optional
+	Version string `json:"version,omitempty"`
+	// DBName defines the Airflow Database to be used
+	// +optional
+	DBName string `json:"database,omitempty"`
+	// DBUser defines the Airflow Database user to be used
+	// +optional
+	DBUser string `json:"dbuser,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+func (s *SchedulerSpec) validate(fp *field.Path) field.ErrorList {
+	return field.ErrorList{}
+}
+
+// WorkerSpec defines the attributes and desired state of Airflow workers
+type WorkerSpec struct {
+	// Image defines the Airflow worker Docker image.
+	// +optional
+	Image string `json:"image,omitempty"`
+	// Version defines the Airflow worker Docker image version
+	// +optional
+	Version string `json:"version,omitempty"`
+	// Replicas is the count of number of workers
+	Replicas int32 `json:"replicas,omitempty"`
+	// Resources is the resource requests and limits for the pods.
+	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+func (s *WorkerSpec) validate(fp *field.Path) field.ErrorList {
+	return field.ErrorList{}
+}
+
+//GCSSpec defines the atributed needed to sync from a git repo
+type GCSSpec struct {
+	// Bucket describes the GCS bucket
+	Bucket string `json:"bucket,omitempty"`
+	// Once syncs initially and quits (use init container instead of sidecar)
+	Once bool `json:"once,omitempty"`
+}
+
+func (s *GCSSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.Bucket == "" {
+		errs = append(errs, field.Required(fp.Child("bucket"), "bucket required"))
+	}
+	if s.Once == true {
+		errs = append(errs, field.NotSupported(fp.Child("once"), "true", []string{}))
+	}
+	return errs
+}
+
+//GitSpec defines the atributed needed to sync from a git repo
+type GitSpec struct {
+	// Repo describes the http/ssh uri for git repo
+	Repo string `json:"repo,"`
+	// Branch describes the branch name to be synced
+	Branch string `json:"branch,omitempty"`
+	// Rev is the git hash to be used for syncing
+	Rev string `json:"rev,omitempty"`
+	// User for git access
+	User string `json:"user,omitempty"`
+	// Once syncs initially and quits (use init container instead of sidecar)
+	Once bool `json:"once,omitempty"`
+	// Reference to git credentials (user, password, ssh etc)
+	CredSecretRef *corev1.LocalObjectReference `json:"cred,omitempty"`
+}
+
+func (s *GitSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+
+	if s.Repo == "" {
+		errs = append(errs, field.Required(fp.Child("repo"), "repo required"))
+	}
+	if s.CredSecretRef != nil && s.CredSecretRef.Name == "" {
+		errs = append(errs, field.Required(fp.Child("cred", "name"), "name missing"))
+	}
+	//errs = append(errs, field.NotSupported(fp.Child("cred"), "", []string{}))
+	return errs
+}
+
+// DagSpec defines where the DAGs are located and how to access them
+type DagSpec struct {
+	// DagSubdir is the directory under source where the dags are present
+	DagSubdir string `json:"subdir,omitempty"`
+	// GitSpec defines details to pull DAGs from a git repo using
+	// github.com/kubernetes/git-sync sidecar
+	Git *GitSpec `json:"git,omitempty"`
+	// NfsPVSpec
+	NfsPV *corev1.PersistentVolumeClaim `json:"nfspv,omitempty"`
+	// Storage has s3 compatible storage spec for copying files from
+	Storage *StorageSpec `json:"storage,omitempty"`
+	// Gcs config which uses storage spec
+	GCS *GCSSpec `json:"gcs,omitempty"`
+}
+
+func (s *DagSpec) validate(fp *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if s == nil {
+		return errs
+	}
+	if s.NfsPV != nil {
+		errs = append(errs, field.NotSupported(fp.Child("nfspv"), "", []string{}))
+	}
+	if s.Storage != nil {
+		errs = append(errs, field.NotSupported(fp.Child("storage"), "", []string{}))
+	}
+	errs = append(errs, s.Git.validate(fp.Child("git"))...)
+	errs = append(errs, s.GCS.validate(fp.Child("git"))...)
+	return errs
+}
+
+// SecretEnv secret env
+type SecretEnv struct {
+	Env    string
+	Secret string
+	Field  string
+}
+
+// ClusterConfig is used to capture the config for Airflow
+type ClusterConfig struct {
+	// Airflow defines a list of kv pairs that describe env variables injected into the nodes
+	// +optional
+	AirflowEnv map[string]string `json:"airflow,omitempty"`
+	// AirflowSecret defines a list of secret envs
+	// +optional
+	AirflowSecretEnv []SecretEnv `json:"airflowsecret,omitempty"`
+}
+
+// AirflowClusterSpec defines the desired state of AirflowCluster
+type AirflowClusterSpec struct {
+	// Selector for fitting pods to nodes whose labels match the selector.
+	// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+	// Define scheduling constraints for pods.
+	// +optional
+	Affinity *corev1.Affinity `json:"affinity,omitempty"`
+	// Custom annotations to be added to the pods.
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty"`
+	// Custom labels to be added to the pods.
+	// +optional
+	Labels map[string]string `json:"labels,omitempty"`
+	// Airflow Executor desired: local,celery,kubernetes
+	// +optional
+	Executor string `json:"executor,omitempty"`
+	// Airflow config as env list
+	// +optional
+	Config ClusterConfig `json:"config,omitempty"`
+	// Spec for MemoryStore component
+	// +optional
+	MemoryStore *MemoryStoreSpec `json:"memoryStore,omitempty"`
+	// Spec for Redis component.
+	// +optional
+	Redis *RedisSpec `json:"redis,omitempty"`
+	// Spec for Airflow Scheduler component.
+	// +optional
+	Scheduler *SchedulerSpec `json:"scheduler,omitempty"`
+	// Spec for Airflow Workers
+	// +optional
+	Worker *WorkerSpec `json:"worker,omitempty"`
+	// Spec for Airflow UI component.
+	// +optional
+	UI *AirflowUISpec `json:"ui,omitempty"`
+	// Spec for Flower component.
+	// +optional
+	Flower *FlowerSpec `json:"flower,omitempty"`
+	// Spec for DAG source and location
+	// +optional
+	DAGs *DagSpec `json:"dags,omitempty"`
+	// AirflowBaseRef is a reference to the AirflowBase CR
+	AirflowBaseRef *corev1.LocalObjectReference `json:"airflowbase,omitempty"`
+}
+
+// SchedulerStatus defines the observed state of Airflow Scheduler
+type SchedulerStatus struct {
+	// DagCount is a count of number of Dags observed
+	DagCount int32 `json:"dagcount,omitempty"`
+	// RunCount is a count of number of Dag Runs observed
+	RunCount int32 `json:"runcount,omitempty"`
+}
+
+// MemoryStoreStatus defines the observed state of MemoryStore
+type MemoryStoreStatus struct {
+	// CreateTime: Output only. The time the instance was created.
+	CreateTime string `json:"createTime,omitempty"`
+	// CurrentLocationID: Output only. The current zone where the Redis
+	// endpoint is placed.
+	CurrentLocationID string `json:"currentLocationId,omitempty"`
+	// StatusMessage: Output only. Additional information about the current
+	// status of this instance, if available.
+	StatusMessage string `json:"statusMessage,omitempty"`
+	// Host: Output only. Hostname or IP address of the exposed Redis endpoint used by
+	// clients to connect to the service.
+	Host string `json:"host,omitempty"`
+	// Port: Output only. The port number of the exposed Redis endpoint.
+	Port int64 `json:"port,omitempty"`
+	// State: Output only. The current state of this instance.
+	State                string `json:"state,omitempty"`
+	status.Meta          `json:",inline"`
+	status.ComponentMeta `json:",inline"`
+}
+
+// AirflowClusterStatus defines the observed state of AirflowCluster
+type AirflowClusterStatus struct {
+	status.Meta          `json:",inline"`
+	status.ComponentMeta `json:",inline"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AirflowCluster represents the Airflow Scheduler and workers for a single DAG folder
+// function. At a minimum they need a SQL service (MySQL or SQLProxy) and Airflow UI.
+// In addition for an installation with minimal external dependencies, NFS and Airflow UI
+// are also added.
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:path=airflowclusters
+type AirflowCluster struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   AirflowClusterSpec   `json:"spec,omitempty"`
+	Status AirflowClusterStatus `json:"status,omitempty"`
+}
+
+// Helper functions for the resources
+
+// ApplyDefaults the AirflowCluster
+func (b *AirflowCluster) ApplyDefaults() {
+	if b.Spec.Redis != nil {
+		if b.Spec.Redis.Image == "" {
+			b.Spec.Redis.Image = defaultRedisImage
+		}
+		if b.Spec.Redis.Version == "" {
+			b.Spec.Redis.Version = defaultRedisVersion
+		}
+		if b.Spec.Redis.RedisHost == "" {
+			if b.Spec.Redis.Image == "" {
+				b.Spec.Redis.Image = defaultRedisImage
+			}
+			if b.Spec.Redis.Version == "" {
+				b.Spec.Redis.Version = defaultRedisVersion
+			}
+		} else if b.Spec.Redis.RedisPort == "" {
+			b.Spec.Redis.RedisPort = defaultRedisPort
+		}
+	}
+	if b.Spec.Scheduler != nil {
+		if b.Spec.Scheduler.Image == "" {
+			b.Spec.Scheduler.Image = defaultSchedulerImage
+		}
+		if b.Spec.Scheduler.Version == "" {
+			b.Spec.Scheduler.Version = defaultSchedulerVersion
+		}
+		if b.Spec.Scheduler.DBName == "" {
+			b.Spec.Scheduler.DBName = string(RandomAlphanumericString(16))
+		}
+		if b.Spec.Scheduler.DBUser == "" {
+			b.Spec.Scheduler.DBUser = string(RandomAlphanumericString(16))
+		}
+	}
+	if b.Spec.UI != nil {
+		if b.Spec.UI.Image == "" {
+			b.Spec.UI.Image = defaultUIImage
+		}
+		if b.Spec.UI.Version == "" {
+			b.Spec.UI.Version = defaultUIVersion
+		}
+		if b.Spec.UI.Replicas == 0 {
+			b.Spec.UI.Replicas = 1
+		}
+	}
+	if b.Spec.Flower != nil {
+		if b.Spec.Flower.Image == "" {
+			b.Spec.Flower.Image = defaultFlowerImage
+		}
+		if b.Spec.Flower.Version == "" {
+			b.Spec.Flower.Version = defaultFlowerVersion
+		}
+		if b.Spec.Flower.Replicas == 0 {
+			b.Spec.Flower.Replicas = 1
+		}
+	}
+	if b.Spec.Executor == "" {
+		b.Spec.Executor = defaultExecutor
+	}
+	if b.Spec.Worker != nil {
+		if b.Spec.Worker.Image == "" {
+			b.Spec.Worker.Image = defaultWorkerImage
+		}
+		if b.Spec.Worker.Version == "" {
+			b.Spec.Worker.Version = defaultWorkerVersion
+		}
+		if b.Spec.Worker.Replicas == 0 {
+			b.Spec.Worker.Replicas = 1
+		}
+		if b.Spec.Executor == ExecutorK8s {
+			b.Spec.Worker.Replicas = 0
+		}
+	}
+	if b.Spec.DAGs != nil {
+		if b.Spec.DAGs.Git != nil {
+			if b.Spec.DAGs.Git.Branch == "" {
+				b.Spec.DAGs.Git.Branch = defaultBranch
+			}
+		}
+	}
+	b.Status.ComponentList = status.ComponentList{}
+	finalizer.EnsureStandard(b)
+}
+
+// Validate the AirflowCluster
+func (b *AirflowCluster) Validate() error {
+	errs := field.ErrorList{}
+	spec := field.NewPath("spec")
+
+	errs = append(errs, b.Spec.MemoryStore.validate(spec.Child("memorystore"))...)
+	errs = append(errs, b.Spec.Redis.validate(spec.Child("redis"))...)
+	errs = append(errs, b.Spec.Scheduler.validate(spec.Child("scheduler"))...)
+	errs = append(errs, b.Spec.Worker.validate(spec.Child("worker"))...)
+	errs = append(errs, b.Spec.DAGs.validate(spec.Child("dags"))...)
+	errs = append(errs, b.Spec.UI.validate(spec.Child("ui"))...)
+	errs = append(errs, b.Spec.Flower.validate(spec.Child("flower"))...)
+
+	allowed := false
+	for _, executor := range allowedExecutors {
+		if executor == b.Spec.Executor {
+			allowed = true
+		}
+	}
+	if !allowed {
+		errs = append(errs, field.NotSupported(spec.Child("executor"), b.Spec.Executor, allowedExecutors))
+	}
+
+	if b.Spec.Scheduler == nil {
+		errs = append(errs, field.Required(spec.Child("scheduler"), "scheduler required"))
+	}
+
+	if b.Spec.Executor == ExecutorCelery {
+		if b.Spec.Redis == nil && b.Spec.MemoryStore == nil {
+			errs = append(errs, field.Required(spec.Child("redis"), "redis/memoryStore required for Celery executor"))
+		}
+		if b.Spec.Worker == nil {
+			errs = append(errs, field.Required(spec.Child("worker"), "worker required for Celery executor"))
+		}
+	}
+	if b.Spec.Executor == ExecutorK8s {
+		if b.Spec.Worker == nil {
+			errs = append(errs, field.Required(spec.Child("worker"), "worker required for Celery executor"))
+		}
+	}
+
+	if b.Spec.Flower != nil {
+		if b.Spec.Executor != ExecutorCelery {
+			errs = append(errs, field.Required(spec.Child("executor"), "celery executor required for Flower"))
+		}
+	}
+
+	if b.Spec.AirflowBaseRef == nil {
+		errs = append(errs, field.Required(spec.Child("airflowbase"), "airflowbase reference missing"))
+	} else if b.Spec.AirflowBaseRef.Name == "" {
+		errs = append(errs, field.Required(spec.Child("airflowbase", "name"), "name missing"))
+	}
+
+	return errs.ToAggregate()
+}
+
+// NewAirflowCluster return a defaults filled AirflowCluster object
+func NewAirflowCluster(name, namespace, executor, base string, dags *DagSpec) *AirflowCluster {
+	c := AirflowCluster{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      name,
+			Labels:    map[string]string{},
+			Namespace: namespace,
+		},
+	}
+	c.Spec = AirflowClusterSpec{}
+	c.Spec.Executor = executor
+	c.Spec.Scheduler = &SchedulerSpec{}
+	c.Spec.UI = &AirflowUISpec{}
+	if executor == ExecutorCelery {
+		c.Spec.Redis = &RedisSpec{}
+		c.Spec.MemoryStore = &MemoryStoreSpec{}
+		c.Spec.Redis = &RedisSpec{}
+		c.Spec.Worker = &WorkerSpec{}
+		c.Spec.Flower = &FlowerSpec{}
+	}
+	c.Spec.DAGs = dags
+	c.Spec.AirflowBaseRef = &corev1.LocalObjectReference{Name: base}
+	c.ApplyDefaults()
+	return &c
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AirflowClusterList contains a list of AirflowCluster
+type AirflowClusterList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []AirflowCluster `json:"items"`
+}
+
+func init() {
+	SchemeBuilder.Register(&AirflowCluster{}, &AirflowClusterList{})
+}
diff --git a/pkg/apis/airflow/v1alpha1/airflowcluster_types_test.go b/pkg/apis/airflow/v1alpha1/airflowcluster_types_test.go
new file mode 100644
index 0000000..282eb63
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/airflowcluster_types_test.go
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	"testing"
+
+	"github.com/onsi/gomega"
+	"golang.org/x/net/context"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+func TestStorageAirflowCluster(t *testing.T) {
+	key := types.NamespacedName{
+		Name:      "foo",
+		Namespace: "default",
+	}
+	created := &AirflowCluster{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      "foo",
+			Namespace: "default",
+		}}
+	g := gomega.NewGomegaWithT(t)
+
+	// Test Create
+	fetched := &AirflowCluster{}
+	g.Expect(c.Create(context.TODO(), created)).NotTo(gomega.HaveOccurred())
+
+	g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(fetched).To(gomega.Equal(created))
+
+	// Test Updating the Labels
+	updated := fetched.DeepCopy()
+	updated.Labels = map[string]string{"hello": "world"}
+	g.Expect(c.Update(context.TODO(), updated)).NotTo(gomega.HaveOccurred())
+
+	g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(fetched).To(gomega.Equal(updated))
+
+	// Test Delete
+	g.Expect(c.Delete(context.TODO(), fetched)).NotTo(gomega.HaveOccurred())
+	g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.HaveOccurred())
+}
diff --git a/pkg/apis/airflow/v1alpha1/doc.go b/pkg/apis/airflow/v1alpha1/doc.go
new file mode 100644
index 0000000..fde5ee9
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1alpha1 contains API Schema definitions for the airflow v1alpha1 API group
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/airflow-operator/pkg/apis/airflow
+// +k8s:defaulter-gen=TypeMeta
+// +groupName=airflow.k8s.io
+package v1alpha1
diff --git a/pkg/apis/airflow/v1alpha1/register.go b/pkg/apis/airflow/v1alpha1/register.go
new file mode 100644
index 0000000..3cd80a5
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/register.go
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTE: Boilerplate only.  Ignore this file.
+
+// Package v1alpha1 contains API Schema definitions for the airflow v1alpha1 API group
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/airflow-operator/pkg/apis/airflow
+// +k8s:defaulter-gen=TypeMeta
+// +groupName=airflow.k8s.io
+package v1alpha1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
+)
+
+var (
+	// SchemeGroupVersion is group version used to register these objects
+	SchemeGroupVersion = schema.GroupVersion{Group: "airflow.k8s.io", Version: "v1alpha1"}
+
+	// SchemeBuilder is used to add go types to the GroupVersionKind scheme
+	SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+
+	// AddToScheme is required by pkg/client/...
+	AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Resource is required by pkg/client/listers/...
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/pkg/apis/airflow/v1alpha1/v1alpha1_suite_test.go b/pkg/apis/airflow/v1alpha1/v1alpha1_suite_test.go
new file mode 100644
index 0000000..1cc96e5
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/v1alpha1_suite_test.go
@@ -0,0 +1,54 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	"log"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+)
+
+var cfg *rest.Config
+var c client.Client
+
+func TestMain(m *testing.M) {
+	t := &envtest.Environment{
+		CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")},
+	}
+
+	err := SchemeBuilder.AddToScheme(scheme.Scheme)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if cfg, err = t.Start(); err != nil {
+		log.Fatal(err)
+	}
+
+	if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil {
+		log.Fatal(err)
+	}
+
+	code := m.Run()
+	t.Stop()
+	os.Exit(code)
+}
diff --git a/pkg/apis/airflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/airflow/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..aaf1d73
--- /dev/null
+++ b/pkg/apis/airflow/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,794 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !ignore_autogenerated
+
+// Code generated by main. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowBase) DeepCopyInto(out *AirflowBase) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowBase.
+func (in *AirflowBase) DeepCopy() *AirflowBase {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowBase)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AirflowBase) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowBaseList) DeepCopyInto(out *AirflowBaseList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]AirflowBase, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowBaseList.
+func (in *AirflowBaseList) DeepCopy() *AirflowBaseList {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowBaseList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AirflowBaseList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowBaseSpec) DeepCopyInto(out *AirflowBaseSpec) {
+	*out = *in
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		*out = new(v1.Affinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Labels != nil {
+		in, out := &in.Labels, &out.Labels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.MySQL != nil {
+		in, out := &in.MySQL, &out.MySQL
+		*out = new(MySQLSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SQLProxy != nil {
+		in, out := &in.SQLProxy, &out.SQLProxy
+		*out = new(SQLProxySpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Postgres != nil {
+		in, out := &in.Postgres, &out.Postgres
+		*out = new(PostgresSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Storage != nil {
+		in, out := &in.Storage, &out.Storage
+		*out = new(NFSStoreSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowBaseSpec.
+func (in *AirflowBaseSpec) DeepCopy() *AirflowBaseSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowBaseSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowBaseStatus) DeepCopyInto(out *AirflowBaseStatus) {
+	*out = *in
+	in.Meta.DeepCopyInto(&out.Meta)
+	in.ComponentMeta.DeepCopyInto(&out.ComponentMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowBaseStatus.
+func (in *AirflowBaseStatus) DeepCopy() *AirflowBaseStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowBaseStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowCluster) DeepCopyInto(out *AirflowCluster) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowCluster.
+func (in *AirflowCluster) DeepCopy() *AirflowCluster {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowCluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AirflowCluster) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowClusterList) DeepCopyInto(out *AirflowClusterList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]AirflowCluster, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowClusterList.
+func (in *AirflowClusterList) DeepCopy() *AirflowClusterList {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowClusterList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AirflowClusterList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowClusterSpec) DeepCopyInto(out *AirflowClusterSpec) {
+	*out = *in
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		*out = new(v1.Affinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Labels != nil {
+		in, out := &in.Labels, &out.Labels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	in.Config.DeepCopyInto(&out.Config)
+	if in.MemoryStore != nil {
+		in, out := &in.MemoryStore, &out.MemoryStore
+		*out = new(MemoryStoreSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Redis != nil {
+		in, out := &in.Redis, &out.Redis
+		*out = new(RedisSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Scheduler != nil {
+		in, out := &in.Scheduler, &out.Scheduler
+		*out = new(SchedulerSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Worker != nil {
+		in, out := &in.Worker, &out.Worker
+		*out = new(WorkerSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UI != nil {
+		in, out := &in.UI, &out.UI
+		*out = new(AirflowUISpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Flower != nil {
+		in, out := &in.Flower, &out.Flower
+		*out = new(FlowerSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DAGs != nil {
+		in, out := &in.DAGs, &out.DAGs
+		*out = new(DagSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AirflowBaseRef != nil {
+		in, out := &in.AirflowBaseRef, &out.AirflowBaseRef
+		*out = new(v1.LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowClusterSpec.
+func (in *AirflowClusterSpec) DeepCopy() *AirflowClusterSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowClusterSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowClusterStatus) DeepCopyInto(out *AirflowClusterStatus) {
+	*out = *in
+	in.Meta.DeepCopyInto(&out.Meta)
+	in.ComponentMeta.DeepCopyInto(&out.ComponentMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowClusterStatus.
+func (in *AirflowClusterStatus) DeepCopy() *AirflowClusterStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowClusterStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AirflowUISpec) DeepCopyInto(out *AirflowUISpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AirflowUISpec.
+func (in *AirflowUISpec) DeepCopy() *AirflowUISpec {
+	if in == nil {
+		return nil
+	}
+	out := new(AirflowUISpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) {
+	*out = *in
+	if in.AirflowEnv != nil {
+		in, out := &in.AirflowEnv, &out.AirflowEnv
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AirflowSecretEnv != nil {
+		in, out := &in.AirflowSecretEnv, &out.AirflowSecretEnv
+		*out = make([]SecretEnv, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig.
+func (in *ClusterConfig) DeepCopy() *ClusterConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DagSpec) DeepCopyInto(out *DagSpec) {
+	*out = *in
+	if in.Git != nil {
+		in, out := &in.Git, &out.Git
+		*out = new(GitSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NfsPV != nil {
+		in, out := &in.NfsPV, &out.NfsPV
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Storage != nil {
+		in, out := &in.Storage, &out.Storage
+		*out = new(StorageSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.GCS != nil {
+		in, out := &in.GCS, &out.GCS
+		*out = new(GCSSpec)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagSpec.
+func (in *DagSpec) DeepCopy() *DagSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(DagSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlowerSpec) DeepCopyInto(out *FlowerSpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowerSpec.
+func (in *FlowerSpec) DeepCopy() *FlowerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(FlowerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCSSpec) DeepCopyInto(out *GCSSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSSpec.
+func (in *GCSSpec) DeepCopy() *GCSSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GCSSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitSpec) DeepCopyInto(out *GitSpec) {
+	*out = *in
+	if in.CredSecretRef != nil {
+		in, out := &in.CredSecretRef, &out.CredSecretRef
+		*out = new(v1.LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSpec.
+func (in *GitSpec) DeepCopy() *GitSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GitSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemoryStoreSpec) DeepCopyInto(out *MemoryStoreSpec) {
+	*out = *in
+	if in.RedisConfigs != nil {
+		in, out := &in.RedisConfigs, &out.RedisConfigs
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryStoreSpec.
+func (in *MemoryStoreSpec) DeepCopy() *MemoryStoreSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MemoryStoreSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemoryStoreStatus) DeepCopyInto(out *MemoryStoreStatus) {
+	*out = *in
+	in.Meta.DeepCopyInto(&out.Meta)
+	in.ComponentMeta.DeepCopyInto(&out.ComponentMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryStoreStatus.
+func (in *MemoryStoreStatus) DeepCopy() *MemoryStoreStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MemoryStoreStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MySQLBackup) DeepCopyInto(out *MySQLBackup) {
+	*out = *in
+	in.Storage.DeepCopyInto(&out.Storage)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLBackup.
+func (in *MySQLBackup) DeepCopy() *MySQLBackup {
+	if in == nil {
+		return nil
+	}
+	out := new(MySQLBackup)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MySQLSpec) DeepCopyInto(out *MySQLSpec) {
+	*out = *in
+	if in.VolumeClaimTemplate != nil {
+		in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.BackupVolumeClaimTemplate != nil {
+		in, out := &in.BackupVolumeClaimTemplate, &out.BackupVolumeClaimTemplate
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Backup != nil {
+		in, out := &in.Backup, &out.Backup
+		*out = new(MySQLBackup)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLSpec.
+func (in *MySQLSpec) DeepCopy() *MySQLSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MySQLSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NFSStoreSpec) DeepCopyInto(out *NFSStoreSpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.Volume != nil {
+		in, out := &in.Volume, &out.Volume
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSStoreSpec.
+func (in *NFSStoreSpec) DeepCopy() *NFSStoreSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NFSStoreSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
+	*out = *in
+	if in.VolumeClaimTemplate != nil {
+		in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSpec.
+func (in *PostgresSpec) DeepCopy() *PostgresSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PostgresSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RedisSpec) DeepCopyInto(out *RedisSpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.VolumeClaimTemplate != nil {
+		in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
+		*out = new(v1.PersistentVolumeClaim)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSpec.
+func (in *RedisSpec) DeepCopy() *RedisSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(RedisSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceLimits) DeepCopyInto(out *ResourceLimits) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceLimits.
+func (in *ResourceLimits) DeepCopy() *ResourceLimits {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceLimits)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceRequests) DeepCopyInto(out *ResourceRequests) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequests.
+func (in *ResourceRequests) DeepCopy() *ResourceRequests {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceRequests)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Resources) DeepCopyInto(out *Resources) {
+	*out = *in
+	out.Requests = in.Requests
+	out.Limits = in.Limits
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources.
+func (in *Resources) DeepCopy() *Resources {
+	if in == nil {
+		return nil
+	}
+	out := new(Resources)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SQLProxySpec) DeepCopyInto(out *SQLProxySpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLProxySpec.
+func (in *SQLProxySpec) DeepCopy() *SQLProxySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SQLProxySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec.
+func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SchedulerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus.
+func (in *SchedulerStatus) DeepCopy() *SchedulerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SchedulerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretEnv) DeepCopyInto(out *SecretEnv) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnv.
+func (in *SecretEnv) DeepCopy() *SecretEnv {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretEnv)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(v1.LocalObjectReference)
+		**out = **in
+	}
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec.
+func (in *StorageSpec) DeepCopy() *StorageSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerSpec) DeepCopyInto(out *WorkerSpec) {
+	*out = *in
+	in.Resources.DeepCopyInto(&out.Resources)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSpec.
+func (in *WorkerSpec) DeepCopy() *WorkerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(WorkerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go
new file mode 100644
index 0000000..62bc462
--- /dev/null
+++ b/pkg/apis/apis.go
@@ -0,0 +1,32 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Generate deepcopy for apis
+//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt
+
+// Package apis contains Kubernetes API groups.
+package apis
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// AddToSchemes may be used to add all resources defined in the project to a Scheme
+var AddToSchemes runtime.SchemeBuilder
+
+// AddToScheme adds all Resources to the Scheme
+func AddToScheme(s *runtime.Scheme) error {
+	return AddToSchemes.AddToScheme(s)
+}
diff --git a/pkg/controller/add_airflowbase.go b/pkg/controller/add_airflowbase.go
new file mode 100644
index 0000000..52a36ef
--- /dev/null
+++ b/pkg/controller/add_airflowbase.go
@@ -0,0 +1,25 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+	"k8s.io/airflow-operator/pkg/controller/airflowbase"
+)
+
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, airflowbase.Add)
+}
diff --git a/pkg/controller/add_airflowcluster.go b/pkg/controller/add_airflowcluster.go
new file mode 100644
index 0000000..b8384df
--- /dev/null
+++ b/pkg/controller/add_airflowcluster.go
@@ -0,0 +1,25 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+	"k8s.io/airflow-operator/pkg/controller/airflowcluster"
+)
+
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, airflowcluster.Add)
+}
diff --git a/pkg/controller/airflowbase/airflowbase_controller.go b/pkg/controller/airflowbase/airflowbase_controller.go
new file mode 100644
index 0000000..bc16f81
--- /dev/null
+++ b/pkg/controller/airflowbase/airflowbase_controller.go
@@ -0,0 +1,352 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowbase
+
+// Major:
+// TODO retry.Retry
+//
+// Minor:
+// TODO reconcile based on hash(spec)
+// TODO validation: assume resources and volume claims are validated by api server ?
+// TODO parameterize controller using config maps for default images, versions, resources etc
+// TODO documentation for CRD spec
+
+import (
+	"encoding/base64"
+	app "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1"
+	alpha1 "k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+	"k8s.io/airflow-operator/pkg/controller/application"
+	"k8s.io/airflow-operator/pkg/controller/common"
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	policyv1 "k8s.io/api/policy/v1beta1"
+	gr "sigs.k8s.io/controller-reconciler/pkg/genericreconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/k8s"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"time"
+)
+
+// Add creates a new AirflowBase Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+	r := newReconciler(mgr)
+	return r.Controller(nil)
+}
+
+func newReconciler(mgr manager.Manager) *gr.Reconciler {
+	return gr.
+		WithManager(mgr).
+		For(&alpha1.AirflowBase{}, alpha1.SchemeGroupVersion).
+		Using(&MySQL{}).
+		Using(&Postgres{}).
+		Using(&SQLProxy{}).
+		Using(&NFS{}).
+		Using(&AirflowBase{}).
+		WithErrorHandler(handleError).
+		WithValidator(validate).
+		WithDefaulter(applyDefaults).
+		RegisterSchemeBuilder(app.SchemeBuilder).
+		Build()
+}
+
+func handleError(resource interface{}, err error, kind string) {
+	ab := resource.(*alpha1.AirflowBase)
+	if err != nil {
+		ab.Status.SetError("ErrorSeen", err.Error())
+	} else {
+		ab.Status.ClearError()
+	}
+}
+
+func validate(resource interface{}) error {
+	ab := resource.(*alpha1.AirflowBase)
+	return ab.Validate()
+}
+
+func applyDefaults(resource interface{}) {
+	ab := resource.(*alpha1.AirflowBase)
+	ab.ApplyDefaults()
+}
+
+// AirflowBase - interface to handle airflowbase
+type AirflowBase struct{}
+
+// MySQL - interface to handle redis
+type MySQL struct{}
+
+// Postgres  - interface to handle flower
+type Postgres struct{}
+
+// SQLProxy - interface to handle scheduler
+type SQLProxy struct{}
+
+// NFS - interface to handle worker
+type NFS struct{}
+
+// =-------------------------- common ------------------------------------
+
+func templateValue(r *alpha1.AirflowBase, component, altcomponent string, label, selector, ports map[string]string) *common.TemplateValue {
+	if altcomponent == "" {
+		altcomponent = component
+	}
+	return &common.TemplateValue{
+		Name:       common.RsrcName(r.Name, component, ""),
+		Namespace:  r.Namespace,
+		SecretName: common.RsrcName(r.Name, altcomponent, ""),
+		SvcName:    common.RsrcName(r.Name, altcomponent, ""),
+		Base:       r,
+		Labels:     label,
+		Selector:   selector,
+		Ports:      ports,
+	}
+}
+
+// updateStatus use reconciled objects to update component status
+func updateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	var period time.Duration
+	stts := &rsrc.(*alpha1.AirflowBase).Status
+	ready := stts.ComponentMeta.UpdateStatus(reconciler.ObjectsByType(reconciled, k8s.Type))
+	stts.Meta.UpdateStatus(&ready, err)
+	return period
+}
+
+// ------------------------------ MYSQL  ---------------------------------------
+
+func (s *MySQL) sts(o *reconciler.Object, v interface{}) {
+	r := v.(*common.TemplateValue)
+	sts := o.Obj.(*k8s.Object).Obj.(*appsv1.StatefulSet)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Base.Spec.MySQL.Resources
+	if r.Base.Spec.MySQL.VolumeClaimTemplate != nil {
+		sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{*r.Base.Spec.MySQL.VolumeClaimTemplate}
+	}
+}
+
+// Observables asd
+func (s *MySQL) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.SecretList{}).
+		For(&policyv1.PodDisruptionBudgetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *MySQL) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowBase)
+	if r.Spec.MySQL == nil {
+		return []reconciler.Object{}, nil
+	}
+	ngdata := templateValue(r, common.ValueAirflowComponentMySQL, common.ValueAirflowComponentSQL, rsrclabels, rsrclabels, map[string]string{"mysql": "3306"})
+	ngdata.Secret = map[string]string{
+		"password":     base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+		"rootpassword": base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+	}
+	ngdata.PDBMinAvail = "100%"
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("mysql-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("secret.yaml", &corev1.SecretList{}, reconciler.NoUpdate).
+		WithTemplate("pdb.yaml", &policyv1.PodDisruptionBudgetList{}).
+		WithTemplate("svc.yaml", &corev1.ServiceList{}).
+		Build()
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (s *MySQL) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	return updateStatus(rsrc, reconciled, err)
+}
+
+// ------------------------------ POSTGRES  ---------------------------------------
+
+func (s *Postgres) sts(o *reconciler.Object, v interface{}) {
+	r := v.(*common.TemplateValue)
+	sts := o.Obj.(*k8s.Object).Obj.(*appsv1.StatefulSet)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Base.Spec.Postgres.Resources
+	if r.Base.Spec.Postgres.VolumeClaimTemplate != nil {
+		sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{*r.Base.Spec.Postgres.VolumeClaimTemplate}
+	}
+}
+
+// Observables asd
+func (s *Postgres) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.SecretList{}).
+		For(&policyv1.PodDisruptionBudgetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *Postgres) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowBase)
+	if r.Spec.Postgres == nil {
+		return []reconciler.Object{}, nil
+	}
+	ngdata := templateValue(r, common.ValueAirflowComponentPostgres, common.ValueAirflowComponentSQL, rsrclabels, rsrclabels, map[string]string{"postgres": "5432"})
+	ngdata.Secret = map[string]string{
+		"password":     base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+		"rootpassword": base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+	}
+	ngdata.PDBMinAvail = "100%"
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("postgres-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("secret.yaml", &corev1.SecretList{}, reconciler.NoUpdate).
+		WithTemplate("pdb.yaml", &policyv1.PodDisruptionBudgetList{}).
+		WithTemplate("svc.yaml", &corev1.ServiceList{}).
+		Build()
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (s *Postgres) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	return updateStatus(rsrc, reconciled, err)
+}
+
+// ------------------------------ NFSStoreSpec ---------------------------------------
+
+// Observables asd
+func (s *NFS) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&policyv1.PodDisruptionBudgetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+func (s *NFS) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowBase)
+	if r.Spec.Storage == nil {
+		return []reconciler.Object{}, nil
+	}
+	ngdata := templateValue(r, common.ValueAirflowComponentNFS, "", rsrclabels, rsrclabels, map[string]string{"nfs": "2049", "mountd": "20048", "rpcbind": "111"})
+	ngdata.PDBMinAvail = "100%"
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("nfs-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("pdb.yaml", &policyv1.PodDisruptionBudgetList{}).
+		WithTemplate("svc.yaml", &corev1.ServiceList{}).
+		Build()
+}
+
+func (s *NFS) sts(o *reconciler.Object, v interface{}) {
+	r := v.(*common.TemplateValue)
+	sts := o.Obj.(*k8s.Object).Obj.(*appsv1.StatefulSet)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Base.Spec.Storage.Resources
+	if r.Base.Spec.Storage.Volume != nil {
+		sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{*r.Base.Spec.Storage.Volume}
+	}
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (s *NFS) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	return updateStatus(rsrc, reconciled, err)
+}
+
+// ------------------------------ SQLProxy ---------------------------------------
+
+// Observables asd
+func (s *SQLProxy) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *SQLProxy) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowBase)
+	if r.Spec.SQLProxy == nil {
+		return []reconciler.Object{}, nil
+	}
+	sqlname := common.RsrcName(r.Name, common.ValueAirflowComponentSQL, "")
+
+	port := "3306"
+	if r.Spec.SQLProxy.Type == common.ValueSQLProxyTypePostgres {
+		port = "5432"
+	}
+	ngdata := templateValue(r, common.ValueAirflowComponentSQLProxy, "", rsrclabels, rsrclabels, map[string]string{"sqlproxy": port})
+	ngdata.SvcName = sqlname
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithFolder("templates/").
+		WithTemplate("sqlproxy-sts.yaml", &appsv1.StatefulSetList{}).
+		WithTemplate("svc.yaml", &corev1.ServiceList{}).
+		WithReferredItem(&corev1.Secret{}, sqlname, r.Namespace).
+		Build()
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (s *SQLProxy) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	return updateStatus(rsrc, reconciled, err)
+}
+
+// ---------------- Global AirflowBase component -------------------------
+
+// Observables asd
+func (s *AirflowBase) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&app.ApplicationList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *AirflowBase) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowBase)
+	selectors := make(map[string]string)
+	for k, v := range rsrclabels {
+		selectors[k] = v
+	}
+	delete(selectors, gr.LabelUsing)
+	ngdata := templateValue(r, common.ValueAirflowComponentBase, "", rsrclabels, selectors, nil)
+	ngdata.Expected = aggregated
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("base-application.yaml", &app.ApplicationList{},
+			func(o *reconciler.Object, v interface{}) {
+				ao := application.NewApplication(o.Obj.(*k8s.Object).Obj)
+				o = ao.SetSelector(r.Labels).
+					SetComponentGK(aggregated).
+					Item()
+			}).
+		Build()
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (s *AirflowBase) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	return updateStatus(rsrc, reconciled, err)
+}
diff --git a/pkg/controller/airflowbase/airflowbase_controller_suite_test.go b/pkg/controller/airflowbase/airflowbase_controller_suite_test.go
new file mode 100644
index 0000000..cc4ec1d
--- /dev/null
+++ b/pkg/controller/airflowbase/airflowbase_controller_suite_test.go
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowbase
+
+import (
+	"log"
+	"os"
+	"path/filepath"
+	"sync"
+	"testing"
+
+	"github.com/onsi/gomega"
+	"k8s.io/airflow-operator/pkg/apis"
+	"k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var cfg *rest.Config
+
+func TestMain(m *testing.M) {
+	t := &envtest.Environment{
+		CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
+	}
+	apis.AddToScheme(scheme.Scheme)
+
+	var err error
+	if cfg, err = t.Start(); err != nil {
+		log.Fatal(err)
+	}
+
+	code := m.Run()
+	t.Stop()
+	os.Exit(code)
+}
+
+// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and
+// writes the request to requests after Reconcile is finished.
+func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) {
+	requests := make(chan reconcile.Request)
+	fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
+		result, err := inner.Reconcile(req)
+		requests <- req
+		return result, err
+	})
+	return fn, requests
+}
+
+// StartTestManager adds recFn
+func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) {
+	stop := make(chan struct{})
+	wg := &sync.WaitGroup{}
+	go func() {
+		wg.Add(1)
+		g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred())
+		wg.Done()
+	}()
+	return stop, wg
+}
diff --git a/pkg/controller/airflowbase/airflowbase_controller_test.go b/pkg/controller/airflowbase/airflowbase_controller_test.go
new file mode 100644
index 0000000..baadff7
--- /dev/null
+++ b/pkg/controller/airflowbase/airflowbase_controller_test.go
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowbase
+
+import (
+	"testing"
+	"time"
+
+	"github.com/onsi/gomega"
+	"golang.org/x/net/context"
+	airflowv1alpha1 "k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+	appsv1 "k8s.io/api/apps/v1"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var c client.Client
+
+var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}}
+var mysqlkey = types.NamespacedName{Name: "foo-mysql", Namespace: "default"}
+var nfskey = types.NamespacedName{Name: "foo-nfs", Namespace: "default"}
+
+const timeout = time.Second * 5
+
+func TestReconcile(t *testing.T) {
+	g := gomega.NewGomegaWithT(t)
+	instance := &airflowv1alpha1.AirflowBase{
+		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
+		Spec: airflowv1alpha1.AirflowBaseSpec{
+			MySQL: &airflowv1alpha1.MySQLSpec{
+				Operator: false,
+			},
+			Storage: &airflowv1alpha1.NFSStoreSpec{
+				Version: "",
+			},
+		},
+	}
+
+	// Setup the Manager and Controller.  Wrap the Controller Reconcile function so it writes each request to a
+	// channel when it is finished.
+	mgr, err := manager.New(cfg, manager.Options{})
+	g.Expect(err).NotTo(gomega.HaveOccurred())
+	c = mgr.GetClient()
+
+	r := newReconciler(mgr)
+	recFn, requests := SetupTestReconcile(r)
+	g.Expect(r.Controller(recFn)).NotTo(gomega.HaveOccurred())
+
+	stopMgr, mgrStopped := StartTestManager(mgr, g)
+
+	defer func() {
+		close(stopMgr)
+		mgrStopped.Wait()
+	}()
+
+	// Create the AirflowBase object and expect the Reconcile and Deployment to be created
+	err = c.Create(context.TODO(), instance)
+	// The instance object may not be a valid object because it might be missing some required fields.
+	// Please modify the instance object by adding required fields and then remove the following if statement.
+	if apierrors.IsInvalid(err) {
+		t.Logf("failed to create object, got an invalid object error: %v", err)
+		return
+	}
+	g.Expect(err).NotTo(gomega.HaveOccurred())
+	defer c.Delete(context.TODO(), instance)
+	g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest)))
+
+	mysqlsts := &appsv1.StatefulSet{}
+	nfssts := &appsv1.StatefulSet{}
+	g.Eventually(func() error { return c.Get(context.TODO(), mysqlkey, mysqlsts) }, timeout).Should(gomega.Succeed())
+	g.Eventually(func() error { return c.Get(context.TODO(), nfskey, nfssts) }, timeout).Should(gomega.Succeed())
+
+	// Delete the Deployment and expect Reconcile to be called for Deployment deletion
+	g.Expect(c.Delete(context.TODO(), mysqlsts)).NotTo(gomega.HaveOccurred())
+	g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest)))
+	g.Eventually(func() error { return c.Get(context.TODO(), mysqlkey, mysqlsts) }, timeout).Should(gomega.Succeed())
+
+	// Manually delete Deployment since GC isn't enabled in the test control plane
+	g.Expect(c.Delete(context.TODO(), nfssts)).To(gomega.Succeed())
+
+}
diff --git a/pkg/controller/airflowcluster/airflowcluster_controller.go b/pkg/controller/airflowcluster/airflowcluster_controller.go
new file mode 100644
index 0000000..0f8a3f6
--- /dev/null
+++ b/pkg/controller/airflowcluster/airflowcluster_controller.go
@@ -0,0 +1,906 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowcluster
+
+import (
+	"context"
+	"encoding/base64"
+	app "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1"
+	alpha1 "k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+	"k8s.io/airflow-operator/pkg/controller/application"
+	"k8s.io/airflow-operator/pkg/controller/common"
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	policyv1 "k8s.io/api/policy/v1beta1"
+	rbacv1 "k8s.io/api/rbac/v1"
+	"sigs.k8s.io/controller-reconciler/pkg/finalizer"
+	gr "sigs.k8s.io/controller-reconciler/pkg/genericreconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/gcp"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/gcp/redis"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/k8s"
+	"sigs.k8s.io/controller-reconciler/pkg/status"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	afk             = "AIRFLOW__KUBERNETES__"
+	afc             = "AIRFLOW__CORE__"
+	gitSyncDestDir  = "gitdags"
+	gCSSyncDestDir  = "dags"
+	airflowHome     = "/usr/local/airflow"
+	airflowDagsBase = airflowHome + "/dags/"
+)
+
+// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=,resources=services,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=airflow.k8s.io,resources=airflowbases,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=airflow.k8s.io,resources=airflowclusters,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=app.k8s.io,resources=applications,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=,resources=secrets,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create;update;patch;delete
+
+// Add creates a new AirflowBase Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+	r := newReconciler(mgr)
+	return r.Controller(nil)
+}
+
+func newReconciler(mgr manager.Manager) *gr.Reconciler {
+	return gr.
+		WithManager(mgr).
+		WithResourceManager(redis.Getter(context.TODO())).
+		For(&alpha1.AirflowCluster{}, alpha1.SchemeGroupVersion).
+		Using(&UI{}).
+		Using(&Redis{}).
+		Using(&MemoryStore{}).
+		Using(&Flower{}).
+		Using(&Scheduler{}).
+		Using(&Worker{}).
+		Using(&Cluster{}).
+		WithErrorHandler(handleError).
+		WithValidator(validate).
+		WithDefaulter(applyDefaults).
+		RegisterSchemeBuilder(app.SchemeBuilder).
+		Build()
+}
+
+func handleError(resource interface{}, err error, kind string) {
+	ac := resource.(*alpha1.AirflowCluster)
+	if err != nil {
+		ac.Status.SetError("ErrorSeen", err.Error())
+	} else {
+		ac.Status.ClearError()
+	}
+}
+
+func validate(resource interface{}) error {
+	ac := resource.(*alpha1.AirflowCluster)
+	return ac.Validate()
+}
+
+func applyDefaults(resource interface{}) {
+	ac := resource.(*alpha1.AirflowCluster)
+	ac.ApplyDefaults()
+}
+
+// Cluster - interface to handle airflowbase
+type Cluster struct{}
+
+// Redis - interface to handle redis
+type Redis struct{}
+
+// Flower - interface to handle flower
+type Flower struct{}
+
+// Scheduler - interface to handle scheduler
+type Scheduler struct{}
+
+// Worker - interface to handle worker
+type Worker struct{}
+
+// UI - interface to handle ui
+type UI struct{}
+
+// MemoryStore - interface to handle memorystore
+type MemoryStore struct{}
+
+// --------------- common functions -------------------------
+
+func envFromSecret(name string, key string) *corev1.EnvVarSource {
+	return &corev1.EnvVarSource{
+		SecretKeyRef: &corev1.SecretKeySelector{
+			LocalObjectReference: corev1.LocalObjectReference{
+				Name: name,
+			},
+			Key: key,
+		},
+	}
+}
+
+// IsPostgres return true for postgres
+func IsPostgres(s *alpha1.AirflowBaseSpec) bool {
+	postgres := false
+	if s.Postgres != nil {
+		postgres = true
+	}
+	if s.SQLProxy != nil && s.SQLProxy.Type == common.ValueSQLProxyTypePostgres {
+		postgres = true
+	}
+	return postgres
+}
+
+func updateSts(o *reconciler.Object, v interface{}) (*appsv1.StatefulSet, *common.TemplateValue) {
+	r := v.(*common.TemplateValue)
+	sts := o.Obj.(*k8s.Object).Obj.(*appsv1.StatefulSet)
+	sts.Spec.Template.Spec.Containers[0].Env = getAirflowEnv(r.Cluster, sts.Name, r.Base)
+	addAirflowContainers(r.Cluster, sts)
+	return sts, r
+}
+
+func templateValue(r *alpha1.AirflowCluster, dependent []reconciler.Object, component string, label, selector, ports map[string]string) *common.TemplateValue {
+	b := k8s.GetItem(dependent, &alpha1.AirflowBase{}, r.Spec.AirflowBaseRef.Name, r.Namespace)
+	base := b.(*alpha1.AirflowBase)
+	return &common.TemplateValue{
+		Name:       common.RsrcName(r.Name, component, ""),
+		Namespace:  r.Namespace,
+		SecretName: common.RsrcName(r.Name, component, ""),
+		SvcName:    common.RsrcName(r.Name, component, ""),
+		Cluster:    r,
+		Base:       base,
+		Labels:     label,
+		Selector:   selector,
+		Ports:      ports,
+	}
+}
+
+func addAirflowContainers(r *alpha1.AirflowCluster, ss *appsv1.StatefulSet) {
+	if r.Spec.DAGs != nil {
+		init, dc := dagContainer(r.Spec.DAGs, "dags-data")
+		if init {
+			ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, dc)
+		} else {
+			ss.Spec.Template.Spec.Containers = append(ss.Spec.Template.Spec.Containers, dc)
+		}
+	}
+}
+
+func addMySQLUserDBContainer(r *alpha1.AirflowCluster, ss *appsv1.StatefulSet) {
+	sqlRootSecret := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSvcName := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+	env := []corev1.EnvVar{
+		{Name: "SQL_ROOT_PASSWORD", ValueFrom: envFromSecret(sqlRootSecret, "rootpassword")},
+		{Name: "SQL_DB", Value: r.Spec.Scheduler.DBName},
+		{Name: "SQL_USER", Value: r.Spec.Scheduler.DBUser},
+		{Name: "SQL_PASSWORD", ValueFrom: envFromSecret(sqlSecret, "password")},
+		{Name: "SQL_HOST", Value: sqlSvcName},
+		{Name: "DB_TYPE", Value: "mysql"},
+	}
+	containers := []corev1.Container{
+		{
+			Name:    "mysql-dbcreate",
+			Image:   alpha1.DefaultMySQLImage + ":" + alpha1.DefaultMySQLVersion,
+			Env:     env,
+			Command: []string{"/bin/bash"},
+			//SET GLOBAL explicit_defaults_for_timestamp=ON;
+			Args: []string{"-c", `
+mysql -uroot -h$(SQL_HOST) -p$(SQL_ROOT_PASSWORD) << EOSQL
+CREATE DATABASE IF NOT EXISTS $(SQL_DB);
+USE $(SQL_DB);
+CREATE USER IF NOT EXISTS '$(SQL_USER)'@'%' IDENTIFIED BY '$(SQL_PASSWORD)';
+GRANT ALL ON $(SQL_DB).* TO '$(SQL_USER)'@'%' ;
+FLUSH PRIVILEGES;
+SHOW GRANTS FOR $(SQL_USER);
+EOSQL
+`},
+		},
+	}
+	ss.Spec.Template.Spec.InitContainers = append(containers, ss.Spec.Template.Spec.InitContainers...)
+}
+
+func addPostgresUserDBContainer(r *alpha1.AirflowCluster, ss *appsv1.StatefulSet) {
+	sqlRootSecret := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSvcName := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+	env := []corev1.EnvVar{
+		{Name: "SQL_ROOT_PASSWORD", ValueFrom: envFromSecret(sqlRootSecret, "rootpassword")},
+		{Name: "SQL_DB", Value: r.Spec.Scheduler.DBName},
+		{Name: "SQL_USER", Value: r.Spec.Scheduler.DBUser},
+		{Name: "SQL_PASSWORD", ValueFrom: envFromSecret(sqlSecret, "password")},
+		{Name: "SQL_HOST", Value: sqlSvcName},
+		{Name: "DB_TYPE", Value: "postgres"},
+	}
+	containers := []corev1.Container{
+		{
+			Name:    "postgres-dbcreate",
+			Image:   alpha1.DefaultPostgresImage + ":" + alpha1.DefaultPostgresVersion,
+			Env:     env,
+			Command: []string{"/bin/bash"},
+			Args: []string{"-c", `
+PGPASSWORD=$(SQL_ROOT_PASSWORD) psql -h $SQL_HOST -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = '$(SQL_DB)'" | grep -q 1 || (PGPASSWORD=$(SQL_ROOT_PASSWORD) psql -h $SQL_HOST -U postgres -c "CREATE DATABASE $(SQL_DB)" &&
+PGPASSWORD=$(SQL_ROOT_PASSWORD) psql -h $SQL_HOST -U postgres -c "CREATE USER $(SQL_USER) WITH ENCRYPTED PASSWORD '$(SQL_PASSWORD)'; GRANT ALL PRIVILEGES ON DATABASE $(SQL_DB) TO $(SQL_USER)")
+`},
+		},
+	}
+	ss.Spec.Template.Spec.InitContainers = append(containers, ss.Spec.Template.Spec.InitContainers...)
+}
+
+func dependantResources(i interface{}) []reconciler.Object {
+	r := i.(*alpha1.AirflowCluster)
+	rsrc := []reconciler.Object{}
+	rsrc = append(rsrc, k8s.ReferredItem(&alpha1.AirflowBase{}, r.Spec.AirflowBaseRef.Name, r.Namespace))
+	return rsrc
+}
+
+func getAirflowPrometheusEnv(r *alpha1.AirflowCluster, base *alpha1.AirflowBase) []corev1.EnvVar {
+	sqlSvcName := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+	ap := "AIRFLOW_PROMETHEUS_"
+	apd := ap + "DATABASE_"
+	backend := "mysql"
+	port := "3306"
+	if IsPostgres(&base.Spec) {
+		backend = "postgres"
+		port = "5432"
+	}
+	env := []corev1.EnvVar{
+		{Name: ap + "LISTEN_ADDR", Value: ":9112"},
+		{Name: apd + "BACKEND", Value: backend},
+		{Name: apd + "HOST", Value: sqlSvcName},
+		{Name: apd + "PORT", Value: port},
+		{Name: apd + "USER", Value: r.Spec.Scheduler.DBUser},
+		{Name: apd + "PASSWORD", ValueFrom: envFromSecret(sqlSecret, "password")},
+		{Name: apd + "NAME", Value: r.Spec.Scheduler.DBName},
+	}
+	return env
+}
+
+func getAirflowEnv(r *alpha1.AirflowCluster, saName string, base *alpha1.AirflowBase) []corev1.EnvVar {
+	sp := r.Spec
+	sqlSvcName := common.RsrcName(sp.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+	sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+	schedulerConfigmap := common.RsrcName(r.Name, common.ValueAirflowComponentScheduler, "")
+	redisSecret := ""
+	redisSvcName := ""
+	if sp.MemoryStore == nil {
+		redisSecret = common.RsrcName(r.Name, common.ValueAirflowComponentRedis, "")
+		redisSvcName = redisSecret
+	}
+	dagFolder := airflowDagsBase
+	if sp.DAGs != nil {
+		if sp.DAGs.Git != nil {
+			dagFolder = airflowDagsBase + gitSyncDestDir + "/" + sp.DAGs.DagSubdir
+		} else if sp.DAGs.GCS != nil {
+			dagFolder = airflowDagsBase + gCSSyncDestDir + "/" + sp.DAGs.DagSubdir
+		}
+	}
+	dbType := "mysql"
+	if IsPostgres(&base.Spec) {
+		dbType = "postgres"
+	}
+	env := []corev1.EnvVar{
+		{Name: "EXECUTOR", Value: sp.Executor},
+		{Name: "SQL_PASSWORD", ValueFrom: envFromSecret(sqlSecret, "password")},
+		{Name: afc + "DAGS_FOLDER", Value: dagFolder},
+		{Name: "SQL_HOST", Value: sqlSvcName},
+		{Name: "SQL_USER", Value: sp.Scheduler.DBUser},
+		{Name: "SQL_DB", Value: sp.Scheduler.DBName},
+		{Name: "DB_TYPE", Value: dbType},
+	}
+	if sp.Executor == alpha1.ExecutorK8s {
+		env = append(env, []corev1.EnvVar{
+			{Name: afk + "AIRFLOW_CONFIGMAP", Value: schedulerConfigmap},
+			{Name: afk + "WORKER_CONTAINER_REPOSITORY", Value: sp.Worker.Image},
+			{Name: afk + "WORKER_CONTAINER_TAG", Value: sp.Worker.Version},
+			{Name: afk + "WORKER_CONTAINER_IMAGE_PULL_POLICY", Value: "IfNotPresent"},
+			{Name: afk + "DELETE_WORKER_PODS", Value: "True"},
+			{Name: afk + "NAMESPACE", Value: r.Namespace},
+			//{Name: afk+"IMAGE_PULL_SECRETS", Value: s.ImagePullSecrets},
+			//{Name: afk+"GCP_SERVICE_ACCOUNT_KEYS", Vaslue:  ??},
+		}...)
+		if sp.DAGs != nil && sp.DAGs.Git != nil {
+			env = append(env, []corev1.EnvVar{
+				{Name: afk + "GIT_REPO", Value: sp.DAGs.Git.Repo},
+				{Name: afk + "GIT_BRANCH", Value: sp.DAGs.Git.Branch},
+				{Name: afk + "GIT_SUBPATH", Value: sp.DAGs.DagSubdir},
+				{Name: afk + "GIT_SYNC_DEST", Value: gitSyncDestDir},
+				{Name: afk + "WORKER_SERVICE_ACCOUNT_NAME", Value: saName},
+				{Name: afk + "GIT_DAGS_FOLDER_MOUNT_POINT", Value: airflowDagsBase},
+				// git_sync_root = /git
+				// git_sync_dest = repo
+			}...)
+			if sp.DAGs.Git.CredSecretRef != nil {
+				env = append(env, []corev1.EnvVar{
+					{Name: "GIT_PASSWORD",
+						ValueFrom: envFromSecret(sp.DAGs.Git.CredSecretRef.Name, "password")},
+					{Name: "GIT_USER", Value: sp.DAGs.Git.User},
+				}...)
+			}
+		}
+		// dags_in_image = False
+		// dags_volume_subpath =
+		// dags_volume_claim =
+	}
+	if sp.Executor == alpha1.ExecutorCelery {
+		if sp.MemoryStore != nil {
+			env = append(env,
+				[]corev1.EnvVar{
+					{Name: "REDIS_HOST", Value: sp.MemoryStore.Status.Host},
+					{Name: "REDIS_PORT", Value: strconv.FormatInt(sp.MemoryStore.Status.Port, 10)},
+				}...)
+		} else if r.Spec.Redis.RedisHost == "" {
+			env = append(env,
+				[]corev1.EnvVar{
+					{Name: "REDIS_PASSWORD",
+						ValueFrom: envFromSecret(redisSecret, "password")},
+					{Name: "REDIS_HOST", Value: redisSvcName},
+				}...)
+		} else {
+			env = append(env,
+				[]corev1.EnvVar{
+					{Name: "REDIS_HOST", Value: r.Spec.Redis.RedisHost},
+					{Name: "REDIS_PORT", Value: r.Spec.Redis.RedisPort},
+				}...)
+			if r.Spec.Redis.RedisPassword == true {
+				env = append(env,
+					[]corev1.EnvVar{
+						{Name: "REDIS_PASSWORD",
+							ValueFrom: envFromSecret(redisSecret, "password")},
+					}...)
+			}
+		}
+	}
+
+	// Do sorted key scan. To store the keys in slice in sorted order
+	var keys []string
+	for k := range sp.Config.AirflowEnv {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		env = append(env, corev1.EnvVar{Name: k, Value: sp.Config.AirflowEnv[k]})
+	}
+
+	for _, k := range sp.Config.AirflowSecretEnv {
+		env = append(env, corev1.EnvVar{Name: k.Env, ValueFrom: envFromSecret(k.Secret, k.Field)})
+	}
+
+	return env
+}
+
+// --------------- Global Cluster component -------------------------
+
+// Observables asd
+func (c *Cluster) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&app.ApplicationList{}).
+		Get()
+}
+
+// DependentResources - return dependant resources
+func (c *Cluster) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (c *Cluster) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+
+	selectors := make(map[string]string)
+	for k, v := range rsrclabels {
+		selectors[k] = v
+	}
+	delete(selectors, gr.LabelUsing)
+
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentCluster, rsrclabels, selectors, nil)
+	ngdata.Expected = aggregated
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("cluster-application.yaml", &app.ApplicationList{},
+			func(o *reconciler.Object, v interface{}) {
+				ao := application.NewApplication(o.Obj.(*k8s.Object).Obj)
+				o = ao.SetSelector(r.Labels).
+					SetComponentGK(aggregated).
+					Item()
+			}).
+		Build()
+}
+
+// UpdateStatus use reconciled objects to update component status
+func (c *Cluster) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	var period time.Duration
+	stts := &rsrc.(*alpha1.AirflowCluster).Status
+	ready := stts.ComponentMeta.UpdateStatus(reconciler.ObjectsByType(reconciled, k8s.Type))
+	stts.Meta.UpdateStatus(&ready, err)
+	return period
+}
+
+// ------------------------------ Airflow UI -----------------------------------
+
+// Observables asd
+func (s *UI) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.SecretList{}).
+		Get()
+}
+
+// DependentResources - return dependant resources
+func (s *UI) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Objects returns the list of resource/name for those resources created by
+func (s *UI) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.UI == nil {
+		return []reconciler.Object{}, nil
+	}
+
+	if r.Spec.MemoryStore != nil && r.Spec.MemoryStore.Status.Host == "" {
+		return []reconciler.Object{}, nil
+	}
+
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentUI, rsrclabels, rsrclabels, map[string]string{"web": "8080"})
+	ngdata.Secret = map[string]string{
+		"password": base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+	}
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("ui-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("secret.yaml", &corev1.SecretList{}, reconciler.NoUpdate).
+		Build()
+}
+
+func (s *UI) sts(o *reconciler.Object, v interface{}) {
+	sts, r := updateSts(o, v)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Cluster.Spec.UI.Resources
+	if IsPostgres(&r.Base.Spec) {
+		addPostgresUserDBContainer(r.Cluster, sts)
+	} else {
+		addMySQLUserDBContainer(r.Cluster, sts)
+	}
+}
+
+// ------------------------------ RedisSpec ---------------------------------------
+
+func (s Redis) sts(o *reconciler.Object, v interface{}) {
+	r := v.(*common.TemplateValue)
+	sts := o.Obj.(*k8s.Object).Obj.(*appsv1.StatefulSet)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Cluster.Spec.Redis.Resources
+	if r.Cluster.Spec.Redis.VolumeClaimTemplate != nil {
+		sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{*r.Cluster.Spec.Redis.VolumeClaimTemplate}
+	}
+}
+
+// Observables asd
+func (s *Redis) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.SecretList{}).
+		For(&policyv1.PodDisruptionBudgetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// DependentResources - return dependant resources
+func (s *Redis) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *Redis) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.Redis == nil || r.Spec.Redis.RedisHost != "" {
+		return []reconciler.Object{}, nil
+	}
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentRedis, rsrclabels, rsrclabels, map[string]string{"redis": "6379"})
+	ngdata.Secret = map[string]string{
+		"password": base64.StdEncoding.EncodeToString(common.RandomAlphanumericString(16)),
+	}
+	ngdata.PDBMinAvail = "100%"
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("redis-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("secret.yaml", &corev1.SecretList{}, reconciler.NoUpdate).
+		WithTemplate("pdb.yaml", &policyv1.PodDisruptionBudgetList{}).
+		WithTemplate("svc.yaml", &corev1.ServiceList{}).
+		Build()
+}
+
+// ------------------------------ Scheduler ---------------------------------------
+
+func gcsContainer(s *alpha1.GCSSpec, volName string) (bool, corev1.Container) {
+	init := false
+	container := corev1.Container{}
+	env := []corev1.EnvVar{
+		{Name: "GCS_BUCKET", Value: s.Bucket},
+	}
+	if s.Once {
+		init = true
+	}
+	container = corev1.Container{
+		Name:  "gcs-syncd",
+		Image: alpha1.GCSsyncImage + ":" + alpha1.GCSsyncVersion,
+		Env:   env,
+		Args:  []string{"/home/airflow/gcs"},
+		VolumeMounts: []corev1.VolumeMount{
+			{
+				Name:      volName,
+				MountPath: "/home/airflow/gcs",
+			},
+		},
+	}
+
+	return init, container
+}
+
+func gitContainer(s *alpha1.GitSpec, volName string) (bool, corev1.Container) {
+	init := false
+	container := corev1.Container{}
+	env := []corev1.EnvVar{
+		{Name: "GIT_SYNC_REPO", Value: s.Repo},
+		{Name: "GIT_SYNC_DEST", Value: gitSyncDestDir},
+		{Name: "GIT_SYNC_BRANCH", Value: s.Branch},
+		{Name: "GIT_SYNC_ONE_TIME", Value: strconv.FormatBool(s.Once)},
+		{Name: "GIT_SYNC_REV", Value: s.Rev},
+	}
+	if s.CredSecretRef != nil {
+		env = append(env, []corev1.EnvVar{
+			{Name: "GIT_SYNC_PASSWORD",
+				ValueFrom: envFromSecret(s.CredSecretRef.Name, "password")},
+			{Name: "GIT_SYNC_USERNAME", Value: s.User},
+		}...)
+	}
+	if s.Once {
+		init = true
+	}
+	container = corev1.Container{
+		Name:    "git-sync",
+		Image:   alpha1.GitsyncImage + ":" + alpha1.GitsyncVersion,
+		Env:     env,
+		Command: []string{"/git-sync"},
+		Ports: []corev1.ContainerPort{
+			{
+				Name:          "gitsync",
+				ContainerPort: 2020,
+			},
+		},
+		VolumeMounts: []corev1.VolumeMount{
+			{
+				Name:      volName,
+				MountPath: "/tmp/git",
+			},
+		},
+	}
+
+	return init, container
+}
+
+func dagContainer(s *alpha1.DagSpec, volName string) (bool, corev1.Container) {
+	init := false
+	container := corev1.Container{}
+
+	if s.Git != nil {
+		return gitContainer(s.Git, volName)
+	}
+	if s.GCS != nil {
+		return gcsContainer(s.GCS, volName)
+	}
+
+	return init, container
+}
+
+func (s *Scheduler) sts(o *reconciler.Object, v interface{}) {
+	sts, r := updateSts(o, v)
+	if r.Cluster.Spec.Executor == alpha1.ExecutorK8s {
+		sts.Spec.Template.Spec.ServiceAccountName = sts.Name
+	}
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Cluster.Spec.Scheduler.Resources
+	sts.Spec.Template.Spec.Containers[1].Env = getAirflowPrometheusEnv(r.Cluster, r.Base)
+}
+
+// DependentResources - return dependant resources
+func (s *Scheduler) DependentResources(rsrc interface{}) []reconciler.Object {
+	r := rsrc.(*alpha1.AirflowCluster)
+	resources := dependantResources(rsrc)
+	if r.Spec.Executor == alpha1.ExecutorK8s {
+		sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+		resources = append(resources, k8s.ReferredItem(&corev1.Secret{}, sqlSecret, r.Namespace))
+	}
+	return resources
+}
+
+// Observables - get
+func (s *Scheduler) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.ConfigMapList{}).
+		For(&corev1.ServiceAccountList{}).
+		For(&rbacv1.RoleBindingList{}).
+		Get()
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *Scheduler) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.Scheduler == nil {
+		return []reconciler.Object{}, nil
+	}
+
+	if r.Spec.MemoryStore != nil && r.Spec.MemoryStore.Status.Host == "" {
+		return []reconciler.Object{}, nil
+	}
+
+	b := k8s.GetItem(dependent, &alpha1.AirflowBase{}, r.Spec.AirflowBaseRef.Name, r.Namespace)
+	base := b.(*alpha1.AirflowBase)
+	bag := k8s.NewObjects()
+	if r.Spec.DAGs != nil {
+		git := r.Spec.DAGs.Git
+		if git != nil && git.CredSecretRef != nil {
+			bag.WithReferredItem(&corev1.Secret{}, git.CredSecretRef.Name, r.Namespace)
+		}
+	}
+
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentScheduler, rsrclabels, rsrclabels, nil)
+	bag.WithValue(ngdata).WithFolder("templates/")
+
+	if r.Spec.Executor == alpha1.ExecutorK8s {
+		sqlSvcName := common.RsrcName(r.Spec.AirflowBaseRef.Name, common.ValueAirflowComponentSQL, "")
+		sqlSecret := common.RsrcName(r.Name, common.ValueAirflowComponentUI, "")
+		se := k8s.GetItem(dependent, &corev1.Secret{}, sqlSecret, r.Namespace)
+		secret := se.(*corev1.Secret)
+
+		dbPrefix := "mysql"
+		port := "3306"
+		if base.Spec.Postgres != nil {
+			dbPrefix = "postgresql+psycopg2"
+			port = "5432"
+		}
+		conn := dbPrefix + "://" + r.Spec.Scheduler.DBUser + ":" + string(secret.Data["password"]) + "@" + sqlSvcName + ":" + port + "/" + r.Spec.Scheduler.DBName
+
+		ngdata.SQLConn = conn
+		bag.WithTemplate("airflow-configmap.yaml", &corev1.ConfigMapList{})
+	}
+
+	return bag.WithTemplate("scheduler-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("serviceaccount.yaml", &corev1.ServiceAccountList{}, reconciler.NoUpdate).
+		WithTemplate("rolebinding.yaml", &rbacv1.RoleBindingList{}).
+		Build()
+}
+
+// ------------------------------ Worker ----------------------------------------
+
+func (s *Worker) sts(o *reconciler.Object, v interface{}) {
+	sts, r := updateSts(o, v)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Cluster.Spec.Worker.Resources
+}
+
+// Observables asd
+func (s *Worker) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		For(&corev1.ServiceList{}).
+		Get()
+}
+
+// DependentResources - return dependant resources
+func (s *Worker) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Objects returns the list of resource/name for those resources created by
+// the operator for this spec and those resources referenced by this operator.
+// Mark resources as owned, referred
+func (s *Worker) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.Worker == nil {
+		return []reconciler.Object{}, nil
+	}
+
+	if r.Spec.MemoryStore != nil && r.Spec.MemoryStore.Status.Host == "" {
+		return []reconciler.Object{}, nil
+	}
+
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentWorker, rsrclabels, rsrclabels, map[string]string{"wlog": "8793"})
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("worker-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		WithTemplate("headlesssvc.yaml", &corev1.ServiceList{}).
+		Build()
+}
+
+// ------------------------------ Flower ---------------------------------------
+
+// Observables asd
+func (s *Flower) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	return k8s.NewObservables().
+		WithLabels(labels).
+		For(&appsv1.StatefulSetList{}).
+		Get()
+}
+
+// DependentResources - return dependant resources
+func (s *Flower) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Objects returns the list of resource/name for those resources created by
+func (s *Flower) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.Flower == nil {
+		return []reconciler.Object{}, nil
+	}
+	if r.Spec.MemoryStore != nil && r.Spec.MemoryStore.Status.Host == "" {
+		return []reconciler.Object{}, nil
+	}
+	ngdata := templateValue(r, dependent, common.ValueAirflowComponentFlower, rsrclabels, rsrclabels, map[string]string{"flower": "5555"})
+
+	return k8s.NewObjects().
+		WithValue(ngdata).
+		WithTemplate("flower-sts.yaml", &appsv1.StatefulSetList{}, s.sts).
+		Build()
+}
+
+func (s *Flower) sts(o *reconciler.Object, v interface{}) {
+	sts, r := updateSts(o, v)
+	sts.Spec.Template.Spec.Containers[0].Resources = r.Cluster.Spec.Flower.Resources
+}
+
+// ------------------------------ MemoryStore ---------------------------------------
+
+// DependentResources - return dependant resources
+func (s *MemoryStore) DependentResources(rsrc interface{}) []reconciler.Object {
+	return dependantResources(rsrc)
+}
+
+// Observables for memstore
+func (s *MemoryStore) Observables(rsrc interface{}, labels map[string]string, dependent []reconciler.Object) []reconciler.Observable {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.MemoryStore == nil {
+		return []reconciler.Observable{}
+	}
+	parent, err := redis.GetParent(r.Spec.MemoryStore.Project, r.Spec.MemoryStore.Region)
+	if err != nil {
+		return []reconciler.Observable{}
+		// TODO assert()
+	}
+	return []reconciler.Observable{redis.NewObservable(labels, parent)}
+}
+
+// Objects - returns resources
+func (s *MemoryStore) Objects(rsrc interface{}, rsrclabels map[string]string, observed, dependent, aggregated []reconciler.Object) ([]reconciler.Object, error) {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.MemoryStore == nil {
+		return []reconciler.Object{}, nil
+	}
+	parent, err := redis.GetParent(r.Spec.MemoryStore.Project, r.Spec.MemoryStore.Region)
+	if err != nil {
+		return []reconciler.Object{}, err
+	}
+	bag, err := gcp.NewObjects().
+		WithLabels(rsrclabels).
+		Add(redis.NewObject(parent, r.Name+"-redis")).
+		Build()
+
+	if err != nil {
+		return []reconciler.Object{}, err
+	}
+	robj := bag[0].Obj.(*redis.Object).Redis
+	robj.AlternativeLocationId = r.Spec.MemoryStore.AlternativeLocationID
+	robj.AuthorizedNetwork = r.Spec.MemoryStore.AuthorizedNetwork
+	robj.DisplayName = r.Name + "-redis"
+
+	if r.Spec.MemoryStore.NotifyKeyspaceEvents != "" {
+		if robj.RedisConfigs == nil {
+			robj.RedisConfigs = make(map[string]string)
+		}
+		robj.RedisConfigs["notify-keyspace-events"] = r.Spec.MemoryStore.NotifyKeyspaceEvents
+	}
+
+	if r.Spec.MemoryStore.MaxMemoryPolicy != "" {
+		if robj.RedisConfigs == nil {
+			robj.RedisConfigs = make(map[string]string)
+		}
+		robj.RedisConfigs["maxmemory-policy"] = r.Spec.MemoryStore.MaxMemoryPolicy
+	}
+
+	robj.RedisVersion = r.Spec.MemoryStore.RedisVersion
+	robj.MemorySizeGb = int64(r.Spec.MemoryStore.MemorySizeGb)
+	robj.Tier = strings.ToUpper(r.Spec.MemoryStore.Tier)
+
+	return bag, nil
+}
+
+// UpdateStatus - update status block
+func (s *MemoryStore) UpdateStatus(rsrc interface{}, reconciled []reconciler.Object, err error) time.Duration {
+	var period time.Duration
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.MemoryStore == nil {
+		return period
+	}
+	stts := &r.Spec.MemoryStore.Status
+	ready := false
+	if len(reconciled) != 0 {
+		instance := reconciled[0].Obj.(*redis.Object).Redis
+		stts.CreateTime = instance.CreateTime
+		stts.CurrentLocationID = instance.CurrentLocationId
+		stts.Host = instance.Host
+		stts.Port = instance.Port
+		stts.State = instance.State
+		if instance.State != "READY" && instance.State != "MAINTENANCE" {
+			period = time.Second * 30
+		}
+		stts.StatusMessage = instance.StatusMessage
+		ready = true
+		stts.Meta.UpdateStatus(&ready, err)
+	} else {
+		period = time.Second * 30
+		stts.Meta.UpdateStatus(&ready, err)
+	}
+	return period
+}
+
+// Differs returns true if the resource needs to be updated
+func (s *MemoryStore) Differs(expected reconciler.Object, observed reconciler.Object) bool {
+	return true //differs(expected, observed)
+}
+
+// Finalize - finalizes MemoryStore component when it is deleted
+func (s *MemoryStore) Finalize(rsrc interface{}, observed, dependent []reconciler.Object) error {
+	r := rsrc.(*alpha1.AirflowCluster)
+	if r.Spec.MemoryStore == nil {
+		return nil
+	}
+	obj := r.Spec.MemoryStore
+	obj.Status.NotReady("Finalizing", "Finalizing in progress")
+	if len(observed) != 0 {
+		finalizer.Add(r, finalizer.Cleanup)
+		items := observed
+		for i := range items {
+			items[i].Delete = true
+		}
+		obj.Status.SetCondition(status.Cleanup, "InProgress", "Items pending deletion")
+	} else {
+		finalizer.Remove(r, finalizer.Cleanup)
+	}
+	return nil
+}
diff --git a/pkg/controller/airflowcluster/airflowcluster_controller_suite_test.go b/pkg/controller/airflowcluster/airflowcluster_controller_suite_test.go
new file mode 100644
index 0000000..93cad71
--- /dev/null
+++ b/pkg/controller/airflowcluster/airflowcluster_controller_suite_test.go
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowcluster
+
+import (
+	"log"
+	"os"
+	"path/filepath"
+	"sync"
+	"testing"
+
+	"github.com/onsi/gomega"
+	"k8s.io/airflow-operator/pkg/apis"
+	"k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var cfg *rest.Config
+
+func TestMain(m *testing.M) {
+	t := &envtest.Environment{
+		CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
+	}
+	apis.AddToScheme(scheme.Scheme)
+
+	var err error
+	if cfg, err = t.Start(); err != nil {
+		log.Fatal(err)
+	}
+
+	code := m.Run()
+	t.Stop()
+	os.Exit(code)
+}
+
+// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and
+// writes the request to requests after Reconcile is finished.
+func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) {
+	requests := make(chan reconcile.Request)
+	fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
+		result, err := inner.Reconcile(req)
+		requests <- req
+		return result, err
+	})
+	return fn, requests
+}
+
+// StartTestManager adds recFn
+func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) {
+	stop := make(chan struct{})
+	wg := &sync.WaitGroup{}
+	go func() {
+		wg.Add(1)
+		g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred())
+		wg.Done()
+	}()
+	return stop, wg
+}
diff --git a/pkg/controller/airflowcluster/airflowcluster_controller_test.go b/pkg/controller/airflowcluster/airflowcluster_controller_test.go
new file mode 100644
index 0000000..b62833b
--- /dev/null
+++ b/pkg/controller/airflowcluster/airflowcluster_controller_test.go
@@ -0,0 +1,133 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package airflowcluster
+
+import (
+	"testing"
+	"time"
+
+	"github.com/onsi/gomega"
+	"golang.org/x/net/context"
+	airflowv1alpha1 "k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var c client.Client
+
+var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}}
+var rediskey = types.NamespacedName{Name: "foo-redis", Namespace: "default"}
+var uikey = types.NamespacedName{Name: "foo-airflowui", Namespace: "default"}
+var workerkey = types.NamespacedName{Name: "foo-worker", Namespace: "default"}
+var flowerkey = types.NamespacedName{Name: "foo-flower", Namespace: "default"}
+var schedulerkey = types.NamespacedName{Name: "foo-scheduler", Namespace: "default"}
+
+const timeout = time.Second * 5
+
+func TestReconcile(t *testing.T) {
+	g := gomega.NewGomegaWithT(t)
+	base := &airflowv1alpha1.AirflowBase{
+		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
+		Spec: airflowv1alpha1.AirflowBaseSpec{
+			MySQL: &airflowv1alpha1.MySQLSpec{
+				Operator: false,
+			},
+			Storage: &airflowv1alpha1.NFSStoreSpec{
+				Version: "",
+			},
+		},
+	}
+
+	cluster := &airflowv1alpha1.AirflowCluster{
+		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
+		Spec: airflowv1alpha1.AirflowClusterSpec{
+			Executor:  "Celery",
+			Redis:     &airflowv1alpha1.RedisSpec{Operator: false},
+			Scheduler: &airflowv1alpha1.SchedulerSpec{Version: "1.10.2"},
+			UI:        &airflowv1alpha1.AirflowUISpec{Replicas: 1, Version: "1.10.2"},
+			Worker:    &airflowv1alpha1.WorkerSpec{Replicas: 2, Version: "1.10.2"},
+			Flower:    &airflowv1alpha1.FlowerSpec{Replicas: 1, Version: "1.10.2"},
+			DAGs: &airflowv1alpha1.DagSpec{
+				DagSubdir: "airflow/example_dags/",
+				Git: &airflowv1alpha1.GitSpec{
+					Repo: "https://github.com/apache/incubator-airflow/",
+					Once: true,
+				},
+			},
+			AirflowBaseRef: &corev1.LocalObjectReference{Name: "foo"},
+		},
+	}
+
+	// Setup the Manager and Controller.  Wrap the Controller Reconcile function so it writes each request to a
+	// channel when it is finished.
+	mgr, err := manager.New(cfg, manager.Options{})
+	g.Expect(err).NotTo(gomega.HaveOccurred())
+	c = mgr.GetClient()
+
+	r := newReconciler(mgr)
+	recFn, requests := SetupTestReconcile(r)
+	g.Expect(r.Controller(recFn)).NotTo(gomega.HaveOccurred())
+
+	stopMgr, mgrStopped := StartTestManager(mgr, g)
+
+	defer func() {
+		close(stopMgr)
+		mgrStopped.Wait()
+	}()
+
+	// Create the AirflowCluster object and expect the Reconcile and Deployment to be created
+	err = c.Create(context.TODO(), base)
+	err = c.Create(context.TODO(), cluster)
+	// The cluster object may not be a valid object because it might be missing some required fields.
+	// Please modify the cluster object by adding required fields and then remove the following if statement.
+	if apierrors.IsInvalid(err) {
+		t.Logf("failed to create object, got an invalid object error: %v", err)
+		return
+	}
+	g.Expect(err).NotTo(gomega.HaveOccurred())
+	defer c.Delete(context.TODO(), cluster)
+	g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest)))
+
+	redis := &appsv1.StatefulSet{}
+	ui := &appsv1.StatefulSet{}
+	worker := &appsv1.StatefulSet{}
+	flower := &appsv1.StatefulSet{}
+	scheduler := &appsv1.StatefulSet{}
+	g.Eventually(func() error { return c.Get(context.TODO(), rediskey, redis) }, timeout).Should(gomega.Succeed())
+	g.Eventually(func() error { return c.Get(context.TODO(), uikey, ui) }, timeout).Should(gomega.Succeed())
+	g.Eventually(func() error { return c.Get(context.TODO(), workerkey, worker) }, timeout).Should(gomega.Succeed())
+	g.Eventually(func() error { return c.Get(context.TODO(), schedulerkey, scheduler) }, timeout).Should(gomega.Succeed())
+	g.Eventually(func() error { return c.Get(context.TODO(), flowerkey, flower) }, timeout).Should(gomega.Succeed())
+
+	// Delete the Deployment and expect Reconcile to be called for Deployment deletion
+	g.Expect(c.Delete(context.TODO(), scheduler)).NotTo(gomega.HaveOccurred())
+	g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest)))
+	g.Eventually(func() error { return c.Get(context.TODO(), schedulerkey, scheduler) }, timeout).
+		Should(gomega.Succeed())
+
+	// Manually delete Deployment since GC isn't enabled in the test control plane
+	g.Expect(c.Delete(context.TODO(), redis)).To(gomega.Succeed())
+	g.Expect(c.Delete(context.TODO(), ui)).To(gomega.Succeed())
+	g.Expect(c.Delete(context.TODO(), worker)).To(gomega.Succeed())
+	g.Expect(c.Delete(context.TODO(), flower)).To(gomega.Succeed())
+	g.Expect(c.Delete(context.TODO(), scheduler)).To(gomega.Succeed())
+}
diff --git a/pkg/controller/application/application.go b/pkg/controller/application/application.go
new file mode 100644
index 0000000..b16b2a1
--- /dev/null
+++ b/pkg/controller/application/application.go
@@ -0,0 +1,111 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package application
+
+import (
+	app "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/k8s"
+)
+
+// Application obj to attach methods
+type Application struct {
+	*app.Application
+}
+
+// NewApplication - return Application object from runtime Object
+func NewApplication(obj metav1.Object) Application {
+	return Application{obj.(*app.Application)}
+}
+
+// SetSelector attaches selectors to Application object
+func (a *Application) SetSelector(labels map[string]string) *Application {
+	a.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
+	return a
+}
+
+// SetName sets name
+func (a *Application) SetName(value string) *Application {
+	a.ObjectMeta.Name = value
+	return a
+}
+
+// SetNamespace asets namespace
+func (a *Application) SetNamespace(value string) *Application {
+	a.ObjectMeta.Namespace = value
+	return a
+}
+
+// AddLabels adds more labels
+func (a *Application) AddLabels(value reconciler.KVMap) *Application {
+	value.Merge(a.ObjectMeta.Labels)
+	a.ObjectMeta.Labels = value
+	return a
+}
+
+// Observable returns resource object
+func (a *Application) Observable() *reconciler.Observable {
+	return &reconciler.Observable{
+		Obj: k8s.Observable{
+			Obj:     a.Application,
+			ObjList: &app.ApplicationList{},
+			Labels:  a.GetLabels(),
+		},
+		Type: k8s.Type,
+	}
+}
+
+// Item returns resource object
+func (a *Application) Item() *reconciler.Object {
+	return &reconciler.Object{
+		Lifecycle: reconciler.LifecycleManaged,
+		Type:      k8s.Type,
+		Obj: &k8s.Object{
+			Obj:     a.Application,
+			ObjList: &app.ApplicationList{},
+		},
+	}
+}
+
+// AddToScheme return AddToScheme of application crd
+func AddToScheme(sb *runtime.SchemeBuilder) {
+	*sb = append(*sb, app.AddToScheme)
+}
+
+// SetComponentGK attaches component GK to Application object
+func (a *Application) SetComponentGK(bag []reconciler.Object) *Application {
+	a.Spec.ComponentGroupKinds = []metav1.GroupKind{}
+	gkmap := map[schema.GroupKind]struct{}{}
+	for _, item := range reconciler.ObjectsByType(bag, k8s.Type) {
+		obj := item.Obj.(*k8s.Object)
+		if obj.ObjList != nil {
+			ro := obj.Obj.(runtime.Object)
+			gk := ro.GetObjectKind().GroupVersionKind().GroupKind()
+			if _, ok := gkmap[gk]; !ok {
+				gkmap[gk] = struct{}{}
+				mgk := metav1.GroupKind{
+					Group: gk.Group,
+					Kind:  gk.Kind,
+				}
+				a.Spec.ComponentGroupKinds = append(a.Spec.ComponentGroupKinds, mgk)
+			}
+		}
+	}
+	return a
+}
diff --git a/pkg/controller/application/application_suite_test.go b/pkg/controller/application/application_suite_test.go
new file mode 100644
index 0000000..89b3b49
--- /dev/null
+++ b/pkg/controller/application/application_suite_test.go
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package application_test
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+	"testing"
+)
+
+// TestEventhandler exports tests
+func TestEventhandler(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecsWithDefaultAndCustomReporters(t, "Application Suite", []Reporter{})
+}
diff --git a/pkg/controller/application/application_test.go b/pkg/controller/application/application_test.go
new file mode 100644
index 0000000..6cb6307
--- /dev/null
+++ b/pkg/controller/application/application_test.go
@@ -0,0 +1,99 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package application_test
+
+import (
+	app "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1"
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+	"k8s.io/airflow-operator/pkg/controller/application"
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/k8s"
+)
+
+var _ = Describe("Application", func() {
+	BeforeEach(func() {
+	})
+	labels := map[string]string{
+		"k1": "v1",
+		"k2": "v2",
+		"k3": "v3",
+	}
+	var resources []reconciler.Object = []reconciler.Object{
+		{
+			Lifecycle: reconciler.LifecycleManaged,
+			Type:      k8s.Type,
+			Obj: &k8s.Object{
+				ObjList: &appsv1.DeploymentList{},
+				Obj: &appsv1.Deployment{
+					TypeMeta: metav1.TypeMeta{
+						Kind:       "k3",
+						APIVersion: "v4",
+					},
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "n-deploy",
+						Namespace: "ns",
+						Labels:    labels,
+					},
+				},
+			},
+		},
+		{
+			Lifecycle: reconciler.LifecycleManaged,
+			Type:      k8s.Type,
+			Obj: &k8s.Object{
+				ObjList: &corev1.ConfigMapList{},
+				Obj: &corev1.ConfigMap{
+					TypeMeta: metav1.TypeMeta{
+						Kind:       "k1",
+						APIVersion: "v2",
+					},
+					ObjectMeta: metav1.ObjectMeta{
+						Name:      "n-cm",
+						Namespace: "ns",
+						Labels:    labels,
+					},
+					Data: map[string]string{
+						"test-key": "test-value",
+					},
+				},
+			},
+		},
+	}
+	a := application.NewApplication(&app.Application{})
+	Describe("Status", func() {
+		It("Sets Component Group Kind", func(done Done) {
+			a.SetComponentGK(resources)
+			Expect(len(a.Spec.ComponentGroupKinds)).To(Equal(len(resources)))
+			close(done)
+		})
+		It("Sets Selector", func(done Done) {
+			a.SetSelector(labels)
+			Expect(a.Spec.Selector.MatchLabels).To(Equal(labels))
+			close(done)
+		})
+		It("Sets Meta Name Namespace Labels", func(done Done) {
+			a.SetName("somename").SetNamespace("somens").SetLabels(labels)
+			Expect(a.ObjectMeta.Name).To(Equal("somename"))
+			Expect(a.ObjectMeta.Namespace).To(Equal("somens"))
+			Expect(a.ObjectMeta.Labels).To(Equal(labels))
+			close(done)
+		})
+	})
+})
diff --git a/pkg/controller/application/doc.go b/pkg/controller/application/doc.go
new file mode 100644
index 0000000..87b36ad
--- /dev/null
+++ b/pkg/controller/application/doc.go
@@ -0,0 +1,18 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package application contains code for creating appcrd
+
+package application
diff --git a/pkg/controller/common/common.go b/pkg/controller/common/common.go
new file mode 100644
index 0000000..69dd35c
--- /dev/null
+++ b/pkg/controller/common/common.go
@@ -0,0 +1,119 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+	"bytes"
+	"fmt"
+	alpha1 "k8s.io/airflow-operator/pkg/apis/airflow/v1alpha1"
+	corev1 "k8s.io/api/core/v1"
+	"math/rand"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler"
+	"sigs.k8s.io/controller-reconciler/pkg/reconciler/manager/k8s"
+	"time"
+)
+
+// Constants
+const (
+	PasswordCharNumSpace = "abcdefghijklmnopqrstuvwxyz0123456789"
+	PasswordCharSpace    = "abcdefghijklmnopqrstuvwxyz"
+
+	LabelAirflowCR                   = "airflow-cr"
+	ValueAirflowCRBase               = "airflow-base"
+	ValueAirflowCRCluster            = "airflow-cluster"
+	LabelAirflowCRName               = "airflow-cr-name"
+	LabelAirflowComponent            = "airflow-component"
+	ValueAirflowComponentMemoryStore = "redis"
+	ValueAirflowComponentMySQL       = "mysql"
+	ValueAirflowComponentPostgres    = "postgres"
+	ValueAirflowComponentSQLProxy    = "sqlproxy"
+	ValueAirflowComponentBase        = "base"
+	ValueAirflowComponentCluster     = "cluster"
+	ValueAirflowComponentSQL         = "sql"
+	ValueAirflowComponentUI          = "airflowui"
+	ValueAirflowComponentNFS         = "nfs"
+	ValueAirflowComponentRedis       = "redis"
+	ValueAirflowComponentScheduler   = "scheduler"
+	ValueAirflowComponentWorker      = "worker"
+	ValueAirflowComponentFlower      = "flower"
+	ValueSQLProxyTypeMySQL           = "mysql"
+	ValueSQLProxyTypePostgres        = "postgres"
+	LabelApp                         = "app"
+
+	KindAirflowBase    = "AirflowBase"
+	KindAirflowCluster = "AirflowCluster"
+
+	PodManagementPolicyParallel = "Parallel"
+
+	TemplatePath = "templates/"
+)
+
+var (
+	random = rand.New(rand.NewSource(time.Now().UnixNano()))
+)
+
+func optionsToString(options map[string]string, prefix string) string {
+	var buf bytes.Buffer
+	for k, v := range options {
+		buf.WriteString(fmt.Sprintf("%s%s %s ", prefix, k, v))
+	}
+	return buf.String()
+}
+
+// RandomAlphanumericString generates a random password of some fixed length.
+func RandomAlphanumericString(strlen int) []byte {
+	result := make([]byte, strlen)
+	for i := range result {
+		result[i] = PasswordCharNumSpace[random.Intn(len(PasswordCharNumSpace))]
+	}
+	result[0] = PasswordCharSpace[random.Intn(len(PasswordCharSpace))]
+	return result
+}
+
+// RsrcName - create name
+func RsrcName(name string, component string, suffix string) string {
+	return name + "-" + component + suffix
+}
+
+// TemplateValue replacer
+type TemplateValue struct {
+	Name        string
+	Namespace   string
+	SecretName  string
+	SvcName     string
+	Base        *alpha1.AirflowBase
+	Cluster     *alpha1.AirflowCluster
+	Labels      reconciler.KVMap
+	Selector    reconciler.KVMap
+	Ports       map[string]string
+	Secret      map[string]string
+	PDBMinAvail string
+	Expected    []reconciler.Object
+	SQLConn     string
+}
+
+// differs returns true if the resource needs to be updated
+func differs(expected reconciler.Object, observed reconciler.Object) bool {
+	switch expected.Obj.(*k8s.Object).Obj.(type) {
+	case *corev1.ServiceAccount:
+		// Dont update a SA
+		return false
+	case *corev1.Secret:
+		// Dont update a secret
+		return false
+	}
+	return true
+}
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
new file mode 100644
index 0000000..35786a7
--- /dev/null
+++ b/pkg/controller/controller.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
+var AddToManagerFuncs []func(manager.Manager) error
+
+// AddToManager adds all Controllers to the Manager
+func AddToManager(m manager.Manager) error {
+	for _, f := range AddToManagerFuncs {
+		if err := f(m); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go
new file mode 100644
index 0000000..b1a01ba
--- /dev/null
+++ b/pkg/webhook/webhook.go
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package webhook
+
+import (
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
+var AddToManagerFuncs []func(manager.Manager) error
+
+// AddToManager adds all Controllers to the Manager
+// +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+func AddToManager(m manager.Manager) error {
+	for _, f := range AddToManagerFuncs {
+		if err := f(m); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/templates/airflow-configmap.yaml b/templates/airflow-configmap.yaml
new file mode 100644
index 0000000..9e560e5
--- /dev/null
+++ b/templates/airflow-configmap.yaml
@@ -0,0 +1,378 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Cluster.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+data:
+  airflow.cfg: |
+    [core]
+    airflow_home = /usr/local/airflow
+    dags_folder = /usr/local/airflow/dags
+    base_log_folder = /usr/local/airflow/logs
+    logging_level = INFO
+    executor = KubernetesExecutor
+    parallelism = 32
+    load_examples = False
+    plugins_folder = /usr/local/airflow/plugins
+    sql_alchemy_conn = {{.SQLConn}}
+
+    [scheduler]
+    dag_dir_list_interval = 300
+    child_process_log_directory = /usr/local/airflow/logs/scheduler
+    # Task instances listen for external kill signal (when you clear tasks
+    # from the CLI or the UI), this defines the frequency at which they should
+    # listen (in seconds).
+    job_heartbeat_sec = 5
+    max_threads = 2
+
+    # The scheduler constantly tries to trigger new tasks (look at the
+    # scheduler section in the docs for more information). This defines
+    # how often the scheduler should run (in seconds).
+    scheduler_heartbeat_sec = 5
+
+    # after how much time should the scheduler terminate in seconds
+    # -1 indicates to run continuously (see also num_runs)
+    run_duration = -1
+
+    # after how much time a new DAGs should be picked up from the filesystem
+    min_file_process_interval = 0
+
+    statsd_on = False
+    statsd_host = localhost
+    statsd_port = 8125
+    statsd_prefix = airflow
+
+    print_stats_interval = 30
+    scheduler_zombie_task_threshold = 300
+    max_tis_per_query = 0
+    authenticate = False
+
+    # Turn off scheduler catchup by setting this to False.
+    # Default behavior is unchanged and
+    # Command Line Backfills still work, but the scheduler
+    # will not do scheduler catchup if this is False,
+    # however it can be set on a per DAG basis in the
+    # DAG definition (catchup)
+    catchup_by_default = True
+
+    [webserver]
+    # The base url of your website as airflow cannot guess what domain or
+    # cname you are using. This is used in automated emails that
+    # airflow sends to point links to the right web server
+    base_url = http://localhost:8080
+
+    # The ip specified when starting the web server
+    web_server_host = 0.0.0.0
+
+    # The port on which to run the web server
+    web_server_port = 8080
+
+    # Paths to the SSL certificate and key for the web server. When both are
+    # provided SSL will be enabled. This does not change the web server port.
+    web_server_ssl_cert =
+    web_server_ssl_key =
+
+    # Number of seconds the webserver waits before killing gunicorn master that doesn't respond
+    web_server_master_timeout = 120
+
+    # Number of seconds the gunicorn webserver waits before timing out on a worker
+    web_server_worker_timeout = 120
+
+    # Number of workers to refresh at a time. When set to 0, worker refresh is
+    # disabled. When nonzero, airflow periodically refreshes webserver workers by
+    # bringing up new ones and killing old ones.
+    worker_refresh_batch_size = 1
+
+    # Number of seconds to wait before refreshing a batch of workers.
+    worker_refresh_interval = 30
+
+    # Secret key used to run your flask app
+    secret_key = temporary_key
+
+    # Number of workers to run the Gunicorn web server
+    workers = 4
+
+    # The worker class gunicorn should use. Choices include
+    # sync (default), eventlet, gevent
+    worker_class = sync
+
+    # Log files for the gunicorn webserver. '-' means log to stderr.
+    access_logfile = -
+    error_logfile = -
+
+    # Expose the configuration file in the web server
+    expose_config = False
+
+    # Set to true to turn on authentication:
+    # https://airflow.incubator.apache.org/security.html#web-authentication
+    authenticate = False
+
+    # Filter the list of dags by owner name (requires authentication to be enabled)
+    filter_by_owner = False
+
+    # Filtering mode. Choices include user (default) and ldapgroup.
+    # Ldap group filtering requires using the ldap backend
+    #
+    # Note that the ldap server needs the "memberOf" overlay to be set up
+    # in order to user the ldapgroup mode.
+    owner_mode = user
+
+    # Default DAG view.  Valid values are:
+    # tree, graph, duration, gantt, landing_times
+    dag_default_view = tree
+
+    # Default DAG orientation. Valid values are:
+    # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
+    dag_orientation = LR
+
+    # Puts the webserver in demonstration mode; blurs the names of Operators for
+    # privacy.
+    demo_mode = False
+
+    # The amount of time (in secs) webserver will wait for initial handshake
+    # while fetching logs from other worker machine
+    log_fetch_timeout_sec = 5
+
+    # By default, the webserver shows paused DAGs. Flip this to hide paused
+    # DAGs by default
+    hide_paused_dags_by_default = False
+
+    # Consistent page size across all listing views in the UI
+    page_size = 100
+
+    # Use FAB-based webserver with RBAC feature
+    rbac = True
+
+    [smtp]
+    # If you want airflow to send emails on retries, failure, and you want to use
+    # the airflow.utils.email.send_email_smtp function, you have to configure an
+    # smtp server here
+    smtp_host = localhost
+    smtp_starttls = True
+    smtp_ssl = False
+    # Uncomment and set the user/pass settings if you want to use SMTP AUTH
+    # smtp_user = airflow
+    # smtp_password = airflow
+    smtp_port = 25
+    smtp_mail_from = airflow@example.com
+
+    [kubernetes]
+    airflow_configmap = {{.Name}}
+    worker_container_repository = {{.Cluster.Spec.Worker.Image}}
+    worker_container_tag = {{.Cluster.Spec.Worker.Version}}
+    worker_container_image_pull_policy = IfNotPresent
+    delete_worker_pods = True
+    worker_service_account_name = 
+    git_repo = {{.Cluster.Spec.DAGs.Git.Repo}}
+    git_branch = {{.Cluster.Spec.DAGs.Git.Branch}}
+    git_subpath = {{.Cluster.Spec.DAGs.DagSubdir}}
+    git_dags_folder_mount_point = /usr/local/airflow/dags/
+    git_sync_dest = gitdags
+    git_user =
+    git_password =
+    in_cluster = True
+    namespace = {{.Namespace}}
+    gcp_service_account_keys = 
+
+    # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
+    git_sync_container_repository = gcr.io/google-containers/git-sync-amd64
+    git_sync_container_tag = v2.0.5
+    git_sync_init_container_name = git-sync-clone
+
+    [kubernetes_node_selectors]
+    # The Key-value pairs to be given to worker pods.
+    # The worker pods will be scheduled to the nodes of the specified key-value pairs.
+    # Should be supplied in the format: key = value
+
+    [hive]
+    # Default mapreduce queue for HiveOperator tasks
+    default_hive_mapred_queue =
+
+    [celery]
+    # This section only applies if you are using the CeleryExecutor in
+    # [core] section above
+
+    # The app name that will be used by celery
+    celery_app_name = airflow.executors.celery_executor
+
+    # The concurrency that will be used when starting workers with the
+    # "airflow worker" command. This defines the number of task instances that
+    # a worker will take, so size up your workers based on the resources on
+    # your worker box and the nature of your tasks
+    worker_concurrency = 16
+
+    # When you start an airflow worker, airflow starts a tiny web server
+    # subprocess to serve the workers local log files to the airflow main
+    # web server, who then builds pages and sends them to users. This defines
+    # the port on which the logs are served. It needs to be unused, and open
+    # visible from the main web server to connect into the workers.
+    worker_log_server_port = 8793
+
+    # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
+    # a sqlalchemy database. Refer to the Celery documentation for more
+    # information.
+    # http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings
+    broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
+
+    # The Celery result_backend. When a job finishes, it needs to update the
+    # metadata of the job. Therefore it will post a message on a message bus,
+    # or insert it into a database (depending of the backend)
+    # This status is used by the scheduler to update the state of the task
+    # The use of a database is highly recommended
+    # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
+    result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
+
+    # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
+    # it `airflow flower`. This defines the IP that Celery Flower runs on
+    flower_host = 0.0.0.0
+
+    # The root URL for Flower
+    # Ex: flower_url_prefix = /flower
+    flower_url_prefix =
+
+    # This defines the port that Celery Flower runs on
+    flower_port = 5555
+
+    # Securing Flower with Basic Authentication
+    # Accepts user:password pairs separated by a comma
+    # Example: flower_basic_auth = user1:password1,user2:password2
+    flower_basic_auth =
+
+    # Default queue that tasks get assigned to and that worker listen on.
+    default_queue = default
+
+    # How many processes CeleryExecutor uses to sync task state.
+    # 0 means to use max(1, number of cores - 1) processes.
+    sync_parallelism = 0
+
+    # Import path for celery configuration options
+    celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
+
+    [celery_broker_transport_options]
+    # The visibility timeout defines the number of seconds to wait for the worker
+    # to acknowledge the task before the message is redelivered to another worker.
+    # Make sure to increase the visibility timeout to match the time of the longest
+    # ETA you're planning to use. Especially important in case of using Redis or SQS
+    visibility_timeout = 21600
+
+    # In case of using SSL
+    ssl_active = False
+    ssl_key =
+    ssl_cert =
+    ssl_cacert =
+
+    [dask]
+    # This section only applies if you are using the DaskExecutor in
+    # [core] section above
+
+    # The IP address and port of the Dask cluster's scheduler.
+    cluster_address = 127.0.0.1:8786
+    # TLS/ SSL settings to access a secured Dask scheduler.
+    tls_ca =
+    tls_cert =
+    tls_key =
+
+    [ldap]
+    # set this to ldaps://<your.ldap.server>:<port>
+    uri =
+    user_filter = objectClass=*
+    user_name_attr = uid
+    group_member_attr = memberOf
+    superuser_filter =
+    data_profiler_filter =
+    bind_user = cn=Manager,dc=example,dc=com
+    bind_password = insecure
+    basedn = dc=example,dc=com
+    cacert = /etc/ca/ldap_ca.crt
+    search_scope = LEVEL
+
+    [mesos]
+    # Mesos master address which MesosExecutor will connect to.
+    master = localhost:5050
+
+    # The framework name which Airflow scheduler will register itself as on mesos
+    framework_name = Airflow
+
+    # Number of cpu cores required for running one task instance using
+    # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
+    # command on a mesos slave
+    task_cpu = 1
+
+    # Memory in MB required for running one task instance using
+    # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
+    # command on a mesos slave
+    task_memory = 256
+
+    # Enable framework checkpointing for mesos
+    # See http://mesos.apache.org/documentation/latest/slave-recovery/
+    checkpoint = False
+
+    # Failover timeout in milliseconds.
+    # When checkpointing is enabled and this option is set, Mesos waits
+    # until the configured timeout for
+    # the MesosExecutor framework to re-register after a failover. Mesos
+    # shuts down running tasks if the
+    # MesosExecutor framework fails to re-register within this timeframe.
+    # failover_timeout = 604800
+
+    # Enable framework authentication for mesos
+    # See http://mesos.apache.org/documentation/latest/configuration/
+    authenticate = False
+
+    # Mesos credentials, if authentication is enabled
+    # default_principal = admin
+    # default_secret = admin
+
+    # Optional Docker Image to run on slave before running the command
+    # This image should be accessible from mesos slave i.e mesos slave
+    # should be able to pull this docker image before executing the command.
+    # docker_image_slave = puckel/docker-airflow
+
+    [kerberos]
+    ccache = /tmp/airflow_krb5_ccache
+    # gets augmented with fqdn
+    principal = airflow
+    reinit_frequency = 3600
+    kinit_path = kinit
+    keytab = airflow.keytab
+
+    [cli]
+    api_client = airflow.api.client.json_client
+    endpoint_url = http://localhost:8080
+
+    [api]
+    auth_backend = airflow.api.auth.backend.default
+
+    [github_enterprise]
+    api_rev = v3
+
+    [admin]
+    # UI to hide sensitive variable fields when set to True
+    hide_sensitive_variable_fields = True
+
+    [elasticsearch]
+    elasticsearch_host =
diff --git a/templates/base-application.yaml b/templates/base-application.yaml
new file mode 100644
index 0000000..4cd2422
--- /dev/null
+++ b/templates/base-application.yaml
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: app.k8s.io/v1beta1
+kind: Application
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  annotations:
+    kubernetes-engine.cloud.google.com/icon: >-
+      data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQ4AAAEOCAYAAAB4sfmlAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0QA/wD/AP+gvaeTAAAAB3RJTUUH4gYBCCI1X7ZRVwAAgABJREFUeNrsfXd8HMd59jNbrqM3AiAJsDewS6RIqlGdEiW5ynJTcZdlxym27DixHSdxYstpX+LYSVxU3CVLFtWb1SiJothJkGDvDb1cv9vd+f7YNju7eziIBEBKePFb3NbZ2dmZZ5+3zAwwJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJoMLGe0MjMmYjMmZy7V1tZh3uh2nr7sqlhs/Maj2DvRSAu2RRx4ZlvsJo/3AYzImY3Jm8oHLLsb4m29C4qZV1Yc/cde3lJtv [...]
+  labels:
+    app.kubernetes.io/name: {{.Name}}
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  descriptor:
+    type: Airflow (base)
+    version: '1.10.2'
+    description: |-
+      Apache Airflow is a platform to programmatically author, schedule and monitor workflows.
+      AirflowBase deploys the common components (sql,storage) needed for Airflow cluster.
+  
+      # Support
+      Google does not offer support for this solution. However, community support is available on
+      [Stack Overflow](https://stackoverflow.com/questions/tagged/airflow/).
+      Additional community support is available on [Airflow gitter](https://gitter.im/apache/incubator-airflow).
+    maintainers:
+    - name: Google Click to Deploy
+      url: https://cloud.google.com/solutions/#click-to-deploy
+    links:
+    - description: Apache Airflow documentations
+      url: https://airflow.apache.org/
+    notes: 
diff --git a/templates/cluster-application.yaml b/templates/cluster-application.yaml
new file mode 100644
index 0000000..35518bd
--- /dev/null
+++ b/templates/cluster-application.yaml
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: app.k8s.io/v1beta1
+kind: Application
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  annotations:
+    kubernetes-engine.cloud.google.com/icon: >-
+      data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQ4AAAEOCAYAAAB4sfmlAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0QA/wD/AP+gvaeTAAAAB3RJTUUH4gYBCCI1X7ZRVwAAgABJREFUeNrsfXd8HMd59jNbrqM3AiAJsDewS6RIqlGdEiW5ynJTcZdlxym27DixHSdxYstpX+LYSVxU3CVLFtWb1SiJothJkGDvDb1cv9vd+f7YNju7eziIBEBKePFb3NbZ2dmZZ5+3zAwwJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJmMyJoMLGe0MjMmYjMmZy7V1tZh3uh2nr7sqlhs/Maj2DvRSAu2RRx4ZlvsJo/3AYzImY3Jm8oHLLsb4m29C4qZV1Yc/cde3lJtv [...]
+  labels:
+    app.kubernetes.io/name: {{.Name}}
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  descriptor:
+    type: Airflow (cluster)
+    version: '1.10.2'
+    description: |-
+      Apache Airflow is a platform to programmatically author, schedule and monitor workflows.
+      AirflowCluster deploys the Airflow components and rely on AirflowBase.
+  
+      # Support
+      Google does not offer support for this solution. However, community support is available on
+      [Stack Overflow](https://stackoverflow.com/questions/tagged/airflow/).
+      Additional community support is available on [Airflow gitter](https://gitter.im/apache/incubator-airflow).
+    maintainers:
+    - name: Google Click to Deploy
+      url: https://cloud.google.com/solutions/#click-to-deploy
+    links:
+    - description: Apache Airflow documentations
+      url: https://airflow.apache.org/
+    notes: 
diff --git a/templates/flower-sts.yaml b/templates/flower-sts.yaml
new file mode 100644
index 0000000..c74ed5d
--- /dev/null
+++ b/templates/flower-sts.yaml
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Cluster.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  replicas: {{.Cluster.Spec.Flower.Replicas}}
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  updateStrategy:
+    type: RollingUpdate
+  podManagementPolicy: Parallel
+  template:
+    metadata:
+      labels:
+        {{range $k,$v := .Labels }}
+        {{$k}}: {{$v}}
+        {{end}}
+      annotations:
+        {{range $k,$v := .Cluster.Spec.Annotations }}
+        {{$k}}: {{$v}}
+        {{end}}
+    spec:
+      terminationGracePeriodSeconds: 30
+      nodeSelector:
+        {{range $k,$v := .Cluster.Spec.NodeSelector }}
+        {{$k}}: {{$v}}
+        {{end}}
+      containers:
+      - name: flower
+        args:
+        - flower
+        image: {{.Cluster.Spec.Flower.Image}}:{{.Cluster.Spec.Flower.Version}}
+        imagePullPolicy: IfNotPresent
+        ports:
+        - containerPort: 5555
+          name: flower
+        volumeMounts:
+        - mountPath: /usr/local/airflow/dags/
+          name: dags-data
+      volumes:
+      - emptyDir: {}
+        name: dags-data
diff --git a/templates/headlesssvc.yaml b/templates/headlesssvc.yaml
new file mode 100644
index 0000000..91c9256
--- /dev/null
+++ b/templates/headlesssvc.yaml
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{.SvcName}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  ports:
+  {{range $k,$v := .Ports }}
+  - name: {{$k}}
+    port: {{$v}}
+  {{end}}
+  selector:
+    {{range $k,$v := .Selector }}
+    {{$k}}: {{$v}}
+    {{end}}
+  clusterIP: None
diff --git a/templates/mysql-sts.yaml b/templates/mysql-sts.yaml
new file mode 100644
index 0000000..8cb3dae
--- /dev/null
+++ b/templates/mysql-sts.yaml
@@ -0,0 +1,119 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Base.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  replicas: {{.Base.Spec.MySQL.Replicas}}
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  updateStrategy:
+    type: OnDelete
+  podManagementPolicy: OrderedReady
+  template:
+    metadata:
+      labels:
+        {{range $k,$v := .Labels }}
+        {{$k}}: {{$v}}
+        {{end}}
+      annotations:
+        {{range $k,$v := .Base.Spec.Annotations }}
+        {{$k}}: {{$v}}
+        {{end}}
+    spec:
+      ## TODO affinity in code
+      terminationGracePeriodSeconds: 30 ## check this change to 180 ?
+      #priorityClassName: something
+      #securityContext:
+      #  fsGroup: 1000
+      nodeSelector:
+        {{range $k,$v := .Base.Spec.NodeSelector }}
+        {{$k}}: {{$v}}
+        {{end}}
+      #tolerations:
+      #initContainers:
+      containers:
+      - name: mysql
+        args:
+        - --explicit-defaults-for-timestamp=ON
+        env:
+        - name: MYSQL_DATABASE
+          value: testdb
+        - name: MYSQL_USER
+          value: airflow
+        - name: MYSQL_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              key: password
+              name: {{.SecretName}}
+        - name: MYSQL_ROOT_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              key: rootpassword
+              name: {{.SecretName}}
+        image: {{.Base.Spec.MySQL.Image}}:{{.Base.Spec.MySQL.Version}}
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          exec:
+            command:
+            - bash
+            - -c
+            - mysqladmin -p$MYSQL_ROOT_PASSWORD ping
+          failureThreshold: 3
+          initialDelaySeconds: 30
+          periodSeconds: 20
+          successThreshold: 1
+          timeoutSeconds: 5
+        ports:
+        - containerPort: 3306
+          name: mysql
+          protocol: TCP
+        readinessProbe:
+          exec:
+            command:
+            - bash
+            - -c
+            - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "use testdb"
+          failureThreshold: 3
+          initialDelaySeconds: 10
+          periodSeconds: 5
+          successThreshold: 1
+          timeoutSeconds: 2
+        resources: {}
+        volumeMounts:
+        - name: data
+          mountPath: /var/lib/mysql
+      restartPolicy: Always
+      {{if .Base.Spec.MySQL.VolumeClaimTemplate}}
+      {{else}}
+      volumes:
+      - emptyDir: {}
+        name: data
+      {{end}}
diff --git a/templates/nfs-sts.yaml b/templates/nfs-sts.yaml
new file mode 100644
index 0000000..dc7a367
--- /dev/null
+++ b/templates/nfs-sts.yaml
@@ -0,0 +1,81 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Base.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  updateStrategy:
+    type: OnDelete
+  podManagementPolicy: Parallel
+  serviceName: {{.SvcName}}
+  template:
+    metadata:
+      labels:
+        {{range $k,$v := .Labels }}
+        {{$k}}: {{$v}}
+        {{end}}
+      annotations:
+        {{range $k,$v := .Base.Spec.Annotations }}
+        {{$k}}: {{$v}}
+        {{end}}
+    spec:
+      terminationGracePeriodSeconds: 30 ## check this change to 180 ?
+      nodeSelector:
+        {{range $k,$v := .Base.Spec.NodeSelector }}
+        {{$k}}: {{$v}}
+        {{end}}
+      containers:
+      - name: nfs-server
+        image: {{.Base.Spec.Storage.Image}}:{{.Base.Spec.Storage.Version}}
+        imagePullPolicy: IfNotPresent
+        ports:
+        - containerPort: 2049
+          name: nfs
+          protocol: TCP
+        - containerPort: 20048
+          name: mountd
+          protocol: TCP
+        - containerPort: 111
+          name: rpcbind
+          protocol: TCP
+        resources: {}
+        volumeMounts:
+        - mountPath: /exports
+          name: data
+      restartPolicy: Always
+      {{if .Base.Spec.Storage.Volume}}
+      {{else}}
+      volumes:
+      - emptyDir: {}
+        name: data
+      {{end}}
diff --git a/templates/pdb.yaml b/templates/pdb.yaml
new file mode 100644
index 0000000..4444a1b
--- /dev/null
+++ b/templates/pdb.yaml
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  minAvailable: {{.PDBMinAvail}}
+  selector:
+    matchLabels:
+       {{range $k,$v := .Selector }}
+       {{$k}}: {{$v}}
+       {{end}}
diff --git a/templates/postgres-sts.yaml b/templates/postgres-sts.yaml
new file mode 100644
index 0000000..22db9f7
--- /dev/null
+++ b/templates/postgres-sts.yaml
@@ -0,0 +1,104 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Base.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  replicas: {{.Base.Spec.Postgres.Replicas}}
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  updateStrategy:
+    type: OnDelete
+  podManagementPolicy: OrderedReady
+  template:
+    metadata:
+      labels:
+        {{range $k,$v := .Labels }}
+        {{$k}}: {{$v}}
+        {{end}}
+      annotations:
+        {{range $k,$v := .Base.Spec.Annotations }}
+        {{$k}}: {{$v}}
+        {{end}}
+    spec:
+      terminationGracePeriodSeconds: 30
+      nodeSelector:
+        {{range $k,$v := .Base.Spec.NodeSelector }}
+        {{$k}}: {{$v}}
+        {{end}}
+      containers:
+      - name: postgres
+        env:
+        - name: POSTGRES_DB
+          value: testdb
+        - name: POSTGRES_USER
+          value: postgres
+        - name: POSTGRES_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              key: rootpassword
+              name: {{.SecretName}}
+        image: {{.Base.Spec.Postgres.Image}}:{{.Base.Spec.Postgres.Version}}
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          exec:
+            command:
+            - bash
+            - -c
+            - psql -w -U $POSTGRES_USER -d $POSTGRES_DB -c SELECT 1
+          failureThreshold: 3
+          initialDelaySeconds: 30
+          periodSeconds: 20
+          successThreshold: 1
+          timeoutSeconds: 5
+        ports:
+        - containerPort: 5432
+          name: postgres
+        readinessProbe:
+          exec:
+            command:
+            - bash
+            - -c
+            - psql -w -U $POSTGRES_USER -d $POSTGRES_DB -c SELECT 1
+          failureThreshold: 3
+          initialDelaySeconds: 10
+          periodSeconds: 5
+          successThreshold: 1
+          timeoutSeconds: 2
+        volumeMounts:
+        - name: data
+          mountPath: /var/lib/postgres/data
+      restartPolicy: Always
+      {{if .Base.Spec.Postgres.VolumeClaimTemplate}}
+      {{else}}
+      volumes:
+      - emptyDir: {}
+        name: data
+      {{end}}
diff --git a/templates/redis-sts.yaml b/templates/redis-sts.yaml
new file mode 100644
index 0000000..6d465b5
--- /dev/null
+++ b/templates/redis-sts.yaml
@@ -0,0 +1,106 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{.Name}}
+  namespace: {{.Namespace}}
+  labels:
+    {{range $k,$v := .Labels }}
+    {{$k}}: {{$v}}
+    {{end}}
+  annotations:
+    {{range $k,$v := .Cluster.Spec.Annotations }}
+    {{$k}}: {{$v}}
+    {{end}}
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      {{range $k,$v := .Selector }}
+      {{$k}}: {{$v}}
+      {{end}}
+  updateStrategy:
+    type: OnDelete
+  podManagementPolicy: OrderedReady
+  template:
+    metadata:
+      labels:
+        {{range $k,$v := .Labels }}
+        {{$k}}: {{$v}}
+        {{end}}
+      annotations:
+        {{range $k,$v := .Cluster.Spec.Annotations }}
+        {{$k}}: {{$v}}
+        {{end}}
+    spec:
+      terminationGracePeriodSeconds: 30
+      nodeSelector:
+        {{range $k,$v := .Cluster.Spec.NodeSelector }}
+        {{$k}}: {{$v}}
+        {{end}}
+      containers:
+      - name: redis
+        args:
+        - --requirepass
+        - $(REDIS_PASSWORD)
+        {{if .Cluster.Spec.Redis.AdditionalArgs}}
+        - {{.Cluster.Spec.Redis.AdditionalArgs}}
+        {{end}}
+        env:
+        - name: REDIS_EXTRA_FLAGS
+        - name: REDIS_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              key: password
+              name: {{.SecretName}}
+        image: {{.Cluster.Spec.Redis.Image}}:{{.Cluster.Spec.Redis.Version}}
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          exec:
+            command:
+            - redis-cli
+            - ping
+          failureThreshold: 3
+          initialDelaySeconds: 30
+          periodSeconds: 20
+          successThreshold: 1
+          timeoutSeconds: 5
+        ports:
+        - containerPort: 6379
+          name: redis
+          protocol: TCP
+        readinessProbe:
+          exec:
+            command:
+            - redis-cli
+            - ping
+          failureThreshold: 3
+          initialDelaySeconds: 10
+          periodSeconds: 5
+          successThreshold: 1
+          timeoutSeconds: 2
+        volumeMounts:
+        - mountPath: /data
+          name: data
+      restartPolicy: Always
+      {{if .Cluster.Spec.Redis.VolumeClaimTemplate}}
+      {{else}}
+      volumes:
+      - emptyDir: {}
+        name: data
... 6229 lines suppressed ...