You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2023/05/09 13:09:52 UTC

[hbase-operator-tools] 02/06: HBASE-27830 Introduce hdfs overlay

This is an automated email from the ASF dual-hosted git repository.

ndimiduk pushed a commit to branch 27834-introduce-ha-hdfs-overlay
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git

commit 3471fe723f6fe3c0f40746c3175128bf78823d73
Author: Nick Dimiduk <nd...@apache.org>
AuthorDate: Mon May 1 13:13:29 2023 +0200

    HBASE-27830 Introduce hdfs overlay
---
 .gitignore                                         |   4 +
 hbase-kubernetes-deployment/base/README.md         |  39 +++
 .../base/delete-format-hdfs-configmap-job.yaml     |  89 ++++++
 .../base/environment-configmap.yaml                |  70 +++++
 hbase-kubernetes-deployment/base/java.security     |  18 ++
 hbase-kubernetes-deployment/base/jmxexporter.yaml  |  32 ++
 .../base/kustomization.yaml                        |  71 +++++
 .../base/log4j.properties.hadoop                   |  55 ++++
 hbase-kubernetes-deployment/base/namespace.yaml    |  23 ++
 hbase-kubernetes-deployment/base/rbac.yaml         | 103 +++++++
 .../base/scripts/apiserver_access.sh               |  26 ++
 .../base/scripts/delete_configmap.sh               |  33 +++
 .../base/scripts/describe_node.sh                  |  32 ++
 .../base/scripts/exists_configmap.sh               |  35 +++
 .../base/scripts/get_node_labels.sh                |  20 ++
 .../base/scripts/get_node_labels_from_pod_IP.sh    |  31 ++
 .../base/scripts/get_node_name_from_pod_IP.sh      |  18 ++
 .../base/scripts/get_statefulset.sh                |  32 ++
 .../base/scripts/get_statefulset_replica_count.sh  |  20 ++
 .../base/scripts/jmxping.sh                        |  96 ++++++
 hbase-kubernetes-deployment/base/scripts/log.sh    |  42 +++
 .../base/scripts/topology.sh                       |  65 +++++
 hbase-kubernetes-deployment/base/ssl-client.xml    |  52 ++++
 hbase-kubernetes-deployment/base/ssl-server.xml    |  56 ++++
 .../dockerfiles/kuttl/README.md                    |   4 +-
 .../overlays/hdfs/core-site.xml                    |  96 ++++++
 .../overlays/hdfs/dn-service.yaml                  |  29 ++
 .../overlays/hdfs/dn-statefulset.yaml              | 223 ++++++++++++++
 .../overlays/hdfs/hdfs-site.xml                    | 274 +++++++++++++++++
 .../overlays/hdfs/kustomization.yaml               |  36 +++
 .../overlays/hdfs/nn-service.yaml                  |  29 ++
 .../overlays/hdfs/nn-statefulset.yaml              | 325 +++++++++++++++++++++
 .../tests/bin/kustomize_into_tmpdir.sh             |  86 ++++++
 .../tests/integration/README.md                    | 159 ++++++++++
 .../tests/integration/overlays_hdfs/00-assert.yaml |  31 ++
 .../integration/overlays_hdfs/00-kustomize.yaml    |  20 ++
 .../integration/overlays_hdfs/kustomization.yaml   |  22 ++
 .../tests/integration/test_base/kustomization.yaml |  29 ++
 .../tests/integration/test_base/networkpolicy.yaml |  29 ++
 .../tests/kuttl-test-integration.yaml              |  33 +++
 .../tests/kuttl-test-unit.yaml                     |  25 ++
 .../tests/unit/base/00-assert.yaml                 |  72 +++++
 .../tests/unit/base/00-kustomize.yaml              |  20 ++
 .../tests/unit/base/README.md                      |  24 ++
 .../tests/unit/base/kustomization.yaml             |  20 ++
 .../tests/unit/overlays_hdfs/00-assert.yaml        |  73 +++++
 .../tests/unit/overlays_hdfs/00-kustomize.yaml     |  20 ++
 .../tests/unit/overlays_hdfs/kustomization.yaml    |  21 ++
 48 files changed, 2760 insertions(+), 2 deletions(-)

diff --git a/.gitignore b/.gitignore
index 4ea9ed6..1589b7a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,3 +32,7 @@ awscli-exe*
 *.key
 *.repo
 *.jar
+
+# detritus produced by kuttl
+kubeconfig*
+kuttl-report-*.xml
diff --git a/hbase-kubernetes-deployment/base/README.md b/hbase-kubernetes-deployment/base/README.md
new file mode 100644
index 0000000..da3c6cd
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/README.md
@@ -0,0 +1,39 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+# Base
+
+Some values such as SERVICE name, SERVICEACCOUNT name,
+and RBAC role are hard-coded in the environment-configmap.yaml
+and supplied into the pods as environment variables. Other
+hardcodings include the service name ('hadoop') and the
+namespace we run in (also 'hadoop').
+
+The hadoop Configuration system can interpolate environment variables
+into '\*.xml' file values ONLY.  See
+[Configuration Javadoc](http://hadoop.apache.org/docs/current/api/org/apache/hadoop/conf/Configuration.html)
+
+...but we can not do interpolation of SERVICE name into '\*.xml' file key names
+as is needed when doing HA in hdfs-site.xml... so for now, we have
+hard-codings in 'hdfs-site.xml' key names.  For example, the property key name
+`dfs.ha.namenodes.hadoop` has the SERVICE name ('hadoop') in it or the key
+`dfs.namenode.http-address.hadoop` (TODO: Fix/Workaround).
+
+Edit of pod resources or jvm args for a process are
+done in place in the yaml files or in kustomization
+replacements in overlays.
diff --git a/hbase-kubernetes-deployment/base/delete-format-hdfs-configmap-job.yaml b/hbase-kubernetes-deployment/base/delete-format-hdfs-configmap-job.yaml
new file mode 100644
index 0000000..cc52f4d
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/delete-format-hdfs-configmap-job.yaml
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Job to delete the 'format-hdfs' configmap after hdfs has come up
+# successfully. The 'format-hdfs' configmap is added by running
+# 'kubectl -n hadoop apply -k tools/format-hdfs' (You need the
+# '-n hadoop' to apply the configmap to the 'hadoop' namespace).
+# Add the configmap if you want hdfs to format the filesystem.
+# Do this on initial install only or if you want to clean out
+# the current HDFS data.
+#
+# If the 'format-hdfs' configmap is NOT present, this Job exits/completes.
+# Otherwise, it keeps probing until HDFS is up and healthy, and then
+# this job removes the 'format-hdfs' configmap. The presence of the
+# 'format-hdfs' configmap is checked by all hdfs pods on startup. If
+# the configmap is present, they clean out their data directories and run
+# format/recreate of their data directories. To install the 'format-hdfs'
+# configmap, do it before launch of hdfs. See tools/format-hdfs.
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: delete-format-hdfs-configmap
+spec:
+  ttlSecondsAfterFinished: 300
+  template:
+    spec:
+      containers:
+      - image: hadoop
+        name: delete-format-hdfs-configmap
+        imagePullPolicy: IfNotPresent
+        command:
+        - /bin/bash
+        - -c
+        - |-
+          set -xe
+          # See if 'format-hdfs' configmap is present.
+          # If not, then there is nothing for this job to do, complete, exit 0.
+          /tmp/scripts/exists_configmap.sh format-hdfs || {
+            echo "No 'format-hdfs' configmap found so no work to do; exiting"
+            exit 0
+          }
+          # The `format-hdfs`` configmap is present. Remove it after HDFS is fully up.
+          /tmp/scripts/jmxping.sh namenode ${HADOOP_SERVICE}
+          /tmp/scripts/jmxping.sh datanode ${HADOOP_SERVICE}
+          # TODO: Should we check if ha and if so, if a NN active... get a report on health?
+          # HDFS is up. Delete the format-hdfs flag.
+          /tmp/scripts/delete_configmap.sh format-hdfs
+        resources:
+          requests:
+            cpu: '0.2'
+            memory: 256Mi
+          limits:
+            cpu: '0.5'
+            memory: 512Mi
+        envFrom:
+        - configMapRef:
+            name: environment
+        volumeMounts:
+        - mountPath: /tmp/scripts
+          name: scripts
+        # Scratch dir is a location where init containers place items for later use
+        # by  the main containers when they run.
+        - mountPath: /tmp/scratch
+          name: scratch
+      serviceAccountName: hadoop
+      restartPolicy: Never
+      volumes:
+      - configMap:
+          name: scripts
+          defaultMode: 0555
+        name: scripts
+      # Scratch dir is location where init containers place items for later use
+      # by  the main containers when they run.
+      - emptyDir: {}
+        name: scratch
diff --git a/hbase-kubernetes-deployment/base/environment-configmap.yaml b/hbase-kubernetes-deployment/base/environment-configmap.yaml
new file mode 100644
index 0000000..d018c22
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/environment-configmap.yaml
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Common environment variables shared across pods.
+# Include w/ the 'envFrom:' directive.
+# We have to be pendantic in here. We cannot have a value
+# refer to a define made earlier; the interpolation
+# doesn't work.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: environment
+data:
+  DOMAIN: svc.cluster.local
+  # HADOOP_HOME, HADOOP_HDFS_HOME, etc., and HBASE_HOME are provided by the images.
+  #
+  # The headless-service pods in our statefulsets come up in.
+  # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id
+  # The headless-service is defined in the adjacent rbac.yaml.
+  # Matches the serviceName we have on our statefulsets.
+  # Required that we create it according to https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
+  HADOOP_SERVICE: hadoop
+  # dfs.http.policy
+  # If HTTPS_ONLY or HTTPS_OR_HTTP then we'll depend on https in UI and jmx'ing
+  # and will adjust schema and ports accordingly. If https, we need to get certificates
+  # so cert-manager, etc., needs to be instaled.
+  HTTP_POLICY: HTTP_ONLY
+  DFS_HTTPS_ENABLE: "false"
+  HBASE_SSL_ENABLED: "false"
+  HTTP_AUTH: kerberos
+  # The insecure port for now.
+  DATANODE_DATA_DIR: /data00/dn
+  JOURNALNODE_DATA_DIR: /data00/jn
+  NAMENODE_DATA_DIR: /data00/nn
+  HDFS_AUDIT_LOGGER: INFO,RFAAUDIT
+  HADOOP_DAEMON_ROOT_LOGGER: INFO,RFA,CONSOLE
+  HADOOP_ROOT_LOGGER: INFO,RFA,CONSOLE
+  HADOOP_SECURITY_LOGGER: INFO,RFAS
+  HADOOP_CONF_DIR: /etc/hadoop
+  HADOOP_LOG_DIR: /var/log/hadoop
+  HADOOP_SECURE_LOG: /var/log/hadoop
+  HBASE_ROOT_LOGGER: DEBUG,RFA,console
+  HBASE_LOG_DIR: /var/log/hbase
+  HBASE_CONF_DIR: /etc/hbase
+  # if [ "$HBASE_NO_REDIRECT_LOG" != "" ]; then ... so we are asking for NO redirect of logs.
+  HBASE_NO_REDIRECT_LOG: "true"
+  HBASE_MANAGES_ZK: "false"
+  DFS_REPLICATION: "1"
+  # What percentage of the container memory to give over to the JVM.
+  # Be aware that we look at the container resource limit, NOT request: e.g. if
+  # the resource request memory is set to 8G and the limit is 16G and the
+  # JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT is 50 as in 50%,
+  # the heap will be set to 8G: i.e. 1/2 of the 16G limit.
+  # ip-172-18-132-227.us-west-2.compute.internal
+  # See https://dzone.com/articles/best-practices-java-memory-arguments-for-container
+  JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT: "45"
diff --git a/hbase-kubernetes-deployment/base/java.security b/hbase-kubernetes-deployment/base/java.security
new file mode 100644
index 0000000..c5c4f04
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/java.security
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+networkaddress.cache.ttl=1
+networkaddress.cache.negative.ttl=0
diff --git a/hbase-kubernetes-deployment/base/jmxexporter.yaml b/hbase-kubernetes-deployment/base/jmxexporter.yaml
new file mode 100644
index 0000000..4dd20fa
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/jmxexporter.yaml
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# We run the jmxexporter on most all processes to convert jmx metrics to prometheus.
+# This is the config file it uses.
+#
+# Don't lowercase. Leave the metrics in camelcase. Do this because while
+# jmxexport can lowercase metrics names, telegraf can't.
+#
+#lowercaseOutputName: false
+#lowercaseOutputLabelNames: false
+# From https://godatadriven.com/blog/monitoring-hbase-with-prometheus/
+#rules:
+#  - pattern: HadoopNamespace_([^\W_]+)_table_([^\W_]+)_region_([^\W_]+)_metric_(\w+)
+#    name: HBase_metric_$4
+#    labels:
+#      namespace: "$1"
+#      table: "$2"
+#      region: "$3"
diff --git a/hbase-kubernetes-deployment/base/kustomization.yaml b/hbase-kubernetes-deployment/base/kustomization.yaml
new file mode 100644
index 0000000..43dd57c
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/kustomization.yaml
@@ -0,0 +1,71 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+configMapGenerator:
+- name: hadoop-configuration
+  # Base set of hadoop configurations. Overlays will add to the set here.
+  files:
+  - log4j.properties=log4j.properties.hadoop
+- name: scripts
+  # Useful scripts
+  files:
+  - scripts/jmxping.sh
+  - scripts/apiserver_access.sh
+  - scripts/get_statefulset_replica_count.sh
+  - scripts/get_statefulset.sh
+  - scripts/exists_configmap.sh
+  - scripts/delete_configmap.sh
+  - scripts/topology.sh
+  - scripts/describe_node.sh
+  - scripts/get_node_name_from_pod_IP.sh
+  - scripts/get_node_labels.sh
+  - scripts/get_node_labels_from_pod_IP.sh
+  - scripts/log.sh
+  options:
+    disableNameSuffixHash: true
+- name: global-files
+  # Add files used by most/all processes into a global configuration configmap
+  # accessible to all processes. The environment-configmap defines env varibles used by
+  # all processes and pods. This configmap loads files used by each process.
+  files:
+  - jmxexporter.yaml
+  - java.security
+  - ssl-client.xml
+  - ssl-server.xml
+  options:
+    disableNameSuffixHash: true
+
+secretGenerator:
+- name: keystore-password
+  type: Opaque
+  options:
+    disableNameSuffixHash: true
+  literals:
+  - password=changeit
+
+resources:
+- namespace.yaml
+# Global environment variables read in by pods
+- environment-configmap.yaml
+- rbac.yaml
+- delete-format-hdfs-configmap-job.yaml
+# These depend on cert-manager being installed.
+# See https://cert-manager.io/docs/installation/
+#- clusterissuer.yaml
+#- certificate.yaml
diff --git a/hbase-kubernetes-deployment/base/log4j.properties.hadoop b/hbase-kubernetes-deployment/base/log4j.properties.hadoop
new file mode 100644
index 0000000..df7cf7b
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/log4j.properties.hadoop
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop.console.threshold=LOG
+hadoop.log.maxbackupindex=20
+hadoop.log.maxfilesize=256MB
+hadoop.root.logger=TRACE,CONSOLE
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+hadoop.security.log.maxbackupindex=20
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.logger=INFO,RFAS
+hdfs.audit.log.maxbackupindex=20
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.logger=INFO,RFAAUDIT
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.CONSOLE.Threshold=${hadoop.console.threshold}
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.category.SecurityLogger=${hadoop.security.logger}
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy=DEBUG
+log4j.logger.org.apache.hadoop.net.NetworkTopology=DEBUG
+log4j.rootLogger=${hadoop.root.logger}
diff --git a/hbase-kubernetes-deployment/base/namespace.yaml b/hbase-kubernetes-deployment/base/namespace.yaml
new file mode 100644
index 0000000..f3e73a6
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/namespace.yaml
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Allow the namespace of the user's Kustomization to be the destination of the deployment.
+# How to manage the namespace with Kustomize -- https://stackoverflow.com/a/71150557
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: ~~illegal_value_to_be_overridden_in_Kustomization~~
diff --git a/hbase-kubernetes-deployment/base/rbac.yaml b/hbase-kubernetes-deployment/base/rbac.yaml
new file mode 100644
index 0000000..29e9c89
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/rbac.yaml
@@ -0,0 +1,103 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Service and ServiceAccount names are hard-coded as 'hadoop'.
+# RBAC Role name is also hard-coded as 'hadoop-role'.  Service selects on
+# an app named 'hadoop', another hard-coding.
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: hadoop-role
+rules:
+- resources:
+  - configmaps
+  verbs:
+  - get
+  - delete
+  - list
+  apiGroups:
+  - ''
+- resources:
+  - namespaces
+  verbs:
+  - get
+  - list
+  apiGroups:
+  - ''
+- resources:
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - patch
+  - update
+  apiGroups:
+  - 'apps'
+  - 'api'
+- resources:
+  - pods
+  verbs:
+  - get
+  - list
+  - delete
+  - watch
+  apiGroups:
+  - ''
+- resources:
+  - leases
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - patch
+  - delete
+  apiGroups:
+  - coordination.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: hadoop-role-binding
+subjects:
+- kind: ServiceAccount
+  name: hadoop
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: hadoop-role
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: hadoop
+---
+# Headless-service to cluster all our pods under
+# Matches the ServiceAccount above referenced by statefulsets
+# in their serviceName.
+# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id
+# This is required for statefulsets. See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
+apiVersion: v1
+kind: Service
+metadata:
+  name: hadoop
+spec:
+  clusterIP: None
+  publishNotReadyAddresses: true
+  selector:
+    app: hadoop
diff --git a/hbase-kubernetes-deployment/base/scripts/apiserver_access.sh b/hbase-kubernetes-deployment/base/scripts/apiserver_access.sh
new file mode 100755
index 0000000..4a2929f
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/apiserver_access.sh
@@ -0,0 +1,26 @@
+#! /usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Defines used accessing the apiserver.
+NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
+export NAMESPACE
+APISERVER=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT
+export APISERVER
+CACERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+export CACERT
+TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
+export TOKEN
diff --git a/hbase-kubernetes-deployment/base/scripts/delete_configmap.sh b/hbase-kubernetes-deployment/base/scripts/delete_configmap.sh
new file mode 100755
index 0000000..0a8cff6
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/delete_configmap.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named statefulset
+set -x
+configmap_name="${1}"
+outfile=$(mktemp /tmp/$(basename $0).XXXX)
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+# From https://docs.okd.io/3.7/rest_api/api/v1.ConfigMap.html#Delete-api-v1-namespaces-namespace-configmaps-name
+http_code=$(curl -w  "%{http_code}" -sS -X DELETE --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name" -o $outfile)
+if [[ $http_code -ne 200 ]]; then
+    echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+    exit 1
+fi
+cat $outfile
diff --git a/hbase-kubernetes-deployment/base/scripts/describe_node.sh b/hbase-kubernetes-deployment/base/scripts/describe_node.sh
new file mode 100644
index 0000000..1eee7e4
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/describe_node.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named node
+set -x
+node="${1}"
+outfile=$(mktemp /tmp/$(basename $0).XXXX)
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w  "%{http_code}" -sS --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/nodes/$node" -o $outfile)
+if [[ $http_code -ne 200 ]]; then
+    echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+    exit 1
+fi
+cat $outfile
diff --git a/hbase-kubernetes-deployment/base/scripts/exists_configmap.sh b/hbase-kubernetes-deployment/base/scripts/exists_configmap.sh
new file mode 100755
index 0000000..6b29b65
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/exists_configmap.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Check passed in configmap exists.
+# Also checks if configmap with the POD_NAME exists too.
+# Returns zero if found.
+set -x
+configmap_name="${1}"
+outfile=$(mktemp /tmp/$(basename $0).XXXX)
+trap 'rm -f -- "$outfile"' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+# From https://docs.okd.io/3.7/rest_api/api/v1.ConfigMap.html#Delete-api-v1-namespaces-namespace-configmaps-name
+http_code=$(curl -w  "%{http_code}" -sS --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name" -o $outfile)
+[[ $http_code -eq 200 ]] || (
+  # The configmap does not exist. Look for a configmap with this POD_NAME as a suffix too.
+  http_code=$(curl -w  "%{http_code}" -sS --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name.${POD_NAME}" -o $outfile)
+  [[ $http_code -eq 200 ]]
+)
diff --git a/hbase-kubernetes-deployment/base/scripts/get_node_labels.sh b/hbase-kubernetes-deployment/base/scripts/get_node_labels.sh
new file mode 100644
index 0000000..2262c4f
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/get_node_labels.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Fetch the labels json object for named node
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+${script_dir}/describe_node.sh "${1}" | jq -r '.metadata.labels'
diff --git a/hbase-kubernetes-deployment/base/scripts/get_node_labels_from_pod_IP.sh b/hbase-kubernetes-deployment/base/scripts/get_node_labels_from_pod_IP.sh
new file mode 100644
index 0000000..7d682be
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/get_node_labels_from_pod_IP.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the labels json object of the node upon which the pod with the provided pod IP is running
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/log.sh" "$TOPOLOGY_LOG" # source log function; the $TOPOLOGY_LOG variable is set in topology.sh
+nodeName=$(${script_dir}/get_node_name_from_pod_IP.sh "${1}") # requesting node name based on pod IP
+if [[ nodeName == "null" ]] # if no node is found when querying with this pod IP
+then
+  log -w "Unhandled case: Kubernetes instance not found for this pod IP"
+  echo "null" # null will get passed back to the topology caller; then when looking for the pertinent labels topology.sh will label this DN with the default rack
+else
+  log "nodeName found in pod description: $nodeName"
+  nodeLabels=$(${script_dir}/get_node_labels.sh $nodeName) # getting the labels of the Kube node the pod is running on
+  log "node metadata labels: $nodeLabels"
+  echo $nodeLabels
+fi
diff --git a/hbase-kubernetes-deployment/base/scripts/get_node_name_from_pod_IP.sh b/hbase-kubernetes-deployment/base/scripts/get_node_name_from_pod_IP.sh
new file mode 100644
index 0000000..991c8d5
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/get_node_name_from_pod_IP.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Get the name of the Kubernetes node with the provided hadoop pod IP
+set -x
+podIP="${1}" # this will be the IP of a datanode
+outfile=$(mktemp /tmp/$(basename $0).XXXX)
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w  "%{http_code}" -sS --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/hadoop/pods?fieldSelector=status.podIP%3D$podIP" -o $outfile)
+if [[ $http_code -ne 200 ]]; then
+    echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+    exit 1
+fi
+
+# using jq, only return the name of the node containing this pod; jq will return null if no node is found
+cat $outfile | jq -r .items[0].spec.nodeName
diff --git a/hbase-kubernetes-deployment/base/scripts/get_statefulset.sh b/hbase-kubernetes-deployment/base/scripts/get_statefulset.sh
new file mode 100755
index 0000000..2595ab0
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/get_statefulset.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named statefulset
+set -x
+statefulset="${1}"
+outfile=$(mktemp /tmp/$(basename $0).XXXX)
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w  "%{http_code}" -sS --cacert $CACERT -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/apis/apps/v1/namespaces/$NAMESPACE/statefulsets/$statefulset" -o $outfile)
+if [[ $http_code -ne 200 ]]; then
+    echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+    exit 1
+fi
+cat $outfile
diff --git a/hbase-kubernetes-deployment/base/scripts/get_statefulset_replica_count.sh b/hbase-kubernetes-deployment/base/scripts/get_statefulset_replica_count.sh
new file mode 100755
index 0000000..c1e2f1a
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/get_statefulset_replica_count.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Fetch the replica count for named statefulset
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+${script_dir}/get_statefulset.sh "${1}" | jq '.spec.replicas'
diff --git a/hbase-kubernetes-deployment/base/scripts/jmxping.sh b/hbase-kubernetes-deployment/base/scripts/jmxping.sh
new file mode 100755
index 0000000..d3b145f
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/jmxping.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Usage: jmxping.sh <ROLE> <HEADLESS-SERVICE> [<COUNT>]
+# JMX ping that there are at least '<COUNT>' instances of '<ROLE>'
+# running in the sub-domain specified by <HEADLESS-SERVICE>
+# (See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id).
+# If no '<COUNT>' supplied, we read the replica count from passed
+# in '<ROLE>' statefulset from apiserver.
+set -x
+role="${1}"
+service="${2}"
+count_param="${3}"
+# Schema
+schema=http
+if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+  schema=https
+fi
+# Jmxport to use
+case "${role}" in
+  datanode)
+    jmxport=9864
+    if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+      # If HTTP policy is https, use https jmx port.
+      jmxport=9865
+    fi
+    ;;
+  namenode)
+    jmxport=9870
+    if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+      # If HTTP policy is https, use https jmx port.
+      jmxport=9871
+    fi
+    ;;
+  journalnode)
+    jmxport=8480
+    if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+      # If HTTP policy is https, use https jmx port.
+      jmxport=8481
+    fi
+    ;;
+  master)
+    jmxport=16010
+    ;;
+  regionserver)
+    jmxport=16030
+    ;;
+  *)
+    exit 1
+    ;;
+esac
+
+interval=5
+timeout=$((60 * 60))
+while ((timeout > 0))
+do
+  # The statefulset we depend on may not have deployed yet... so the first
+  # attempts at getting replicas may fail.
+  # https://stackoverflow.com/questions/3601515/how-to-check-if-a-variable-is-set-in-bash
+  replicas=$(/tmp/scripts/get_statefulset_replica_count.sh $role)
+  count=${count_param}
+  if [ "x" = "${count_param}x" ]; then
+    count=${replicas}
+  else
+    count=$((replicas < count_param? replicas : count_param ))
+  fi
+  seq_end=$(( $count - 1 ))
+  total=0
+  for i in $( seq 0 $seq_end ); do
+    # Url is http://journalnode-1:8480/jmx?qry=java.lang:type=OperatingSystem
+    url="${schema}://${role}-${i}.${service}:${jmxport}/jmx?qry=java.lang:type=OperatingSystem"
+    # Returns 1 if success, zero otherwise.
+    result=$(curl --cacert /tmp/scratch/ca.crt -v "$url" | grep -c SystemLoadAverage)
+    ((total+=result))
+    (($total != $count)) || exit 0
+  done
+  timeout=$(($timeout - $interval))
+  echo "Failed; sleeping $interval, then retrying for $timeout more seconds"
+  sleep $interval
+done
+echo "Timedout!"
+exit 1
diff --git a/hbase-kubernetes-deployment/base/scripts/log.sh b/hbase-kubernetes-deployment/base/scripts/log.sh
new file mode 100644
index 0000000..56ba8fb
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/log.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# when sourcing log, first argument should be the file within $HADOOP_LOG_DIR that will be written to
+
+filename=${1}
+LOG_FILEPATH="$HADOOP_LOG_DIR/$filename"
+
+# logs provided message to whichever filepath is provided when sourcing log.sh
+# Use -e for error logging, -w for warning logs
+# log [-ew] MESSAGE
+log(){
+  prefix="" # No prefix with default INFO-level logging
+  while getopts ":ew" arg; do
+    case $arg in
+      e) # change prefix to ERROR: in logs
+        prefix="ERROR:"
+        shift
+        ;;
+      w) # change prefix to WARNING: in logs
+        prefix="WARNING:"
+        shift
+        ;;
+    esac
+  done
+  message=${1}
+  echo "$(date +"%F %T") $prefix $message" >> $LOG_FILEPATH
+}
diff --git a/hbase-kubernetes-deployment/base/scripts/topology.sh b/hbase-kubernetes-deployment/base/scripts/topology.sh
new file mode 100755
index 0000000..453641b
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/scripts/topology.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Using topology script notion for HDFS rack awareness: https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/RackAwareness.html
+
+# This script takes in one or more datanode IPs as args and passes out rack name(s) for the pod(s) based on the EKS instance(s) they're running in.
+# It will look for information about the EKS instance's partition placement group: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html#placement-groups-partition
+# As well as information about the EKS instance's availability zone according to AWS: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones
+
+# if partition placement group information is found (in the form of the $partition_group_label variable defined below),
+# then the rack passed out will be "partition-group-<PARTITION NUMBER>".
+
+# Otherwise, the script will take in availability zone information, pass out a
+# rack label like "availability-zone-<AVAILABILITY ZONE NAME>".
+
+# Supposition here is that when datanodes crash, the namenodes will provide the same rack when the pod comes back up.
+# This is the behavior that's been observed when terminating datanodes manually and watching topology logs as they re-initialize.
+
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+TOPOLOGY_LOG="topology.log" # filepath within $HADOOP_LOG_DIR wherein topology logs will be placed
+export TOPOLOGY_LOG
+
+source "${script_dir}/log.sh" $TOPOLOGY_LOG
+partition_group_label="partition_number" # this is an assumption made based on the Siri cluster at the moment; modify this variable if the Kube node label signifying placement groups is named differently
+
+log "argument(s) input to script: $*"
+for dn_IP in "$@"
+do
+  log "datanode IP: $dn_IP"
+  nodeLabels=$(${script_dir}/get_node_labels_from_pod_IP.sh "$dn_IP")
+  nodePartitionGroup=$(echo "$nodeLabels" | jq -r ".$partition_group_label")
+  if [[ "$nodePartitionGroup" == "null" ]];
+  then
+    nodeAZ=$(echo "$nodeLabels" | jq -r '."topology.kubernetes.io/zone"')
+    if [[ "$nodeAZ" == "null" ]];
+    then
+      rack="/default-rack" # when no partition group or availability zone info is found for the datanode
+      log "No partition groups or availability zones found; output default rack $rack for $dn_IP"
+      echo $rack
+    else
+      rack="/availability-zone-$nodeAZ"
+      log "output rack $rack for $dn_IP"
+      echo $rack
+    fi
+  else
+    rack="/partition-group-$nodePartitionGroup"
+    log "output rack $rack for $dn_IP"
+    echo $rack
+  fi
+done
diff --git a/hbase-kubernetes-deployment/base/ssl-client.xml b/hbase-kubernetes-deployment/base/ssl-client.xml
new file mode 100644
index 0000000..3a8ffff
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/ssl-client.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.client.keystore.keypassword</name>
+    <value>changeit</value>
+  </property>
+  <property>
+    <name>ssl.client.keystore.location</name>
+    <value>/tmp/scratch/keystore.jks</value>
+  </property>
+  <property>
+    <name>ssl.client.keystore.password</name>
+    <value>changeit</value>
+  </property>
+  <property>
+    <name>ssl.client.keystore.type</name>
+    <value>jks</value>
+  </property>
+  <property>
+    <name>ssl.client.truststore.location</name>
+    <value>/tmp/scratch/keystore.jks</value>
+  </property>
+  <property>
+    <name>ssl.client.truststore.password</name>
+    <value>changeit</value>
+  </property>
+  <property>
+    <name>ssl.client.truststore.reload.interval</name>
+    <value>10000</value>
+  </property>
+  <property>
+    <name>ssl.client.truststore.type</name>
+    <value>jks</value>
+  </property>
+</configuration>
diff --git a/hbase-kubernetes-deployment/base/ssl-server.xml b/hbase-kubernetes-deployment/base/ssl-server.xml
new file mode 100644
index 0000000..25e26dd
--- /dev/null
+++ b/hbase-kubernetes-deployment/base/ssl-server.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.server.keystore.keypassword</name>
+    <value></value>
+  </property>
+  <property>
+    <name>ssl.server.keystore.password</name>
+    <value>changeit</value>
+  </property>
+  <property>
+    <name>ssl.server.keystore.location</name>
+    <value>/tmp/scratch/keystore.jks</value>
+  </property>
+  <property>
+    <name>ssl.server.keystore.type</name>
+    <value>jks</value>
+  </property>
+  <property>
+    <name>ssl.server.truststore.location</name>
+    <value>/tmp/scratch/truststore.jks</value>
+  </property>
+  <property>
+    <name>ssl.server.truststore.keypassword</name>
+    <value></value>
+  </property>
+  <property>
+    <name>ssl.server.truststore.password</name>
+    <value>changeit</value>
+  </property>
+  <property>
+    <name>ssl.server.truststore.reload.interval</name>
+    <value>10000</value>
+  </property>
+  <property>
+    <name>ssl.server.truststore.type</name>
+    <value>jks</value>
+  </property>
+</configuration>
diff --git a/hbase-kubernetes-deployment/dockerfiles/kuttl/README.md b/hbase-kubernetes-deployment/dockerfiles/kuttl/README.md
index 221066a..55ac855 100644
--- a/hbase-kubernetes-deployment/dockerfiles/kuttl/README.md
+++ b/hbase-kubernetes-deployment/dockerfiles/kuttl/README.md
@@ -62,12 +62,12 @@ $ docker container run --rm -it ${USER}/hbase/operator-tools/kuttl:latest --help
 ```
 
 Running tests in the image requires mounting the workspace into the container image and passing
-appropriate parameters to `kuttl`. For example, run the "small" tests like this:
+appropriate parameters to `kuttl`. For example, run the "unit" tests like this:
 
 ```shell
 $ docker container run \
   --mount type=bind,source=$(pwd),target=/workspace \
   --workdir /workspace \
   ${USER}/hbase/operator-tools/kuttl:latest \
-  --config tests/kuttl-test-small.yaml
+  --config tests/kuttl-test-unit.yaml
 ```
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/core-site.xml b/hbase-kubernetes-deployment/overlays/hdfs/core-site.xml
new file mode 100644
index 0000000..782c61f
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/core-site.xml
@@ -0,0 +1,96 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://${env.HADOOP_SERVICE}</value>
+  </property>
+  <property>
+    <name>fs.trash.interval</name>
+    <value>10080</value>
+  </property>
+  <property>
+    <name>fs.trash.checkpoint.interval</name>
+    <value>10080</value>
+  </property>
+  <property>
+    <name>ha.zookeeper.acl</name>
+    <value>world:anyone:rwcda</value>
+  </property>
+  <property>
+    <name>ha.zookeeper.auth</name>
+    <value></value>
+  </property>
+  <property>
+    <name>ha.zookeeper.quorum</name>
+    <value>${env.HA_ZOOKEEPER_QUORUM}</value>
+  </property>
+  <property>
+    <name>ha.zookeeper.parent-znode</name>
+    <value>/</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hdfs.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hdfs.users</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.user.group.static.mapping.overrides</name>
+    <value>hdfs=supergroup;nobody=;</value>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/tmp/scripts/topology.sh</value>
+  </property>
+  <property>
+    <name>net.topology.script.number.args</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.client.conf</name>
+    <value>ssl-client.xml</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.keystores.factory.class</name>
+    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.require.client.cert</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.server.conf</name>
+    <value>ssl-server.xml</value>
+  </property>
+</configuration>
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/dn-service.yaml b/hbase-kubernetes-deployment/overlays/hdfs/dn-service.yaml
new file mode 100644
index 0000000..4151527
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/dn-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: datanode
+  labels:
+    jmxexporter: enabled
+spec:
+  selector:
+    role: datanode
+  clusterIP: None
+  ports:
+  - name: jmxexporter
+    port: 8000
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/dn-statefulset.yaml b/hbase-kubernetes-deployment/overlays/hdfs/dn-statefulset.yaml
new file mode 100644
index 0000000..772b87d
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/dn-statefulset.yaml
@@ -0,0 +1,223 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: datanode
+spec:
+  podManagementPolicy: Parallel
+  replicas: 1
+  selector:
+    matchLabels:
+      role: datanode
+  serviceName: hadoop
+  template:
+    metadata:
+      labels:
+        role: datanode
+    spec:
+      affinity:
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchLabels:
+                role: datanode
+            topologyKey: kubernetes.io/hostname
+      containers:
+      - image: hadoop
+        name: datanode
+        command:
+          - /bin/bash
+          - -c
+          - |-
+            # Shell context so we can pull in the environment variables set in the container and
+            # via the env and envFrom.
+            # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+            HADOOP_LOGFILE="hdfs-${HOSTNAME}.log" \
+            HDFS_DATANODE_OPTS=" \
+              -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+              -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+              -Djava.security.properties=/tmp/scratch/java.security \
+              -javaagent:${JMX_PROMETHEUS_JAR}=8000:/tmp/scratch/jmxexporter.yaml \
+              -Djava.library.path=${HADOOP_HOME}/lib/native \
+              --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+              -Dio.netty.tryReflectionSetAccessible=true \
+              -Xlog:gc:/var/log/hadoop/gc.log:time,uptime:filecount=10,filesize=100M" \
+            hdfs datanode
+        # For now, just fetch local /jmx
+        # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+        # Do better. Check this DN successfully registered w/ NN. TODO.
+        livenessProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9865 if HTTPS
+            port: 9864
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          failureThreshold: 3
+        readinessProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9865 if HTTPS
+            port: 9864
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          failureThreshold: 3
+        startupProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9865 if HTTPS
+            port: 9864
+          initialDelaySeconds: 10
+          failureThreshold: 30
+          periodSeconds: 10
+        resources:
+          requests:
+            cpu: '0.2'
+            memory: 1Gi
+          limits:
+            cpu: '1.0'
+            memory: 1.5Gi
+        envFrom:
+        - configMapRef:
+            name: environment
+        env:
+        # The 'node' this container is running on, not hdfs namenode.
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 9864
+        - name: https
+          containerPort: 9865
+        - name: data
+          containerPort: 9866
+        - name: ipc
+          containerPort: 9867
+        - name: jmx
+          containerPort: 9864
+        volumeMounts:
+        - mountPath: /etc/hadoop
+          name: hadoop-configuration
+        - mountPath: /var/log/hadoop
+          name: hadoop-logs
+        - mountPath: /tmp/scratch
+          name: scratch
+        - mountPath: /tmp/scripts
+          name: scripts
+        - mountPath: /data00
+          name: data00
+      initContainers:
+      - image: hadoop
+        name: bootstrapper
+        imagePullPolicy: IfNotPresent
+        command:
+        - /bin/bash
+        - -c
+        - |-
+          set -xe
+          mkdir -p ${HADOOP_LOG_DIR} || echo $?
+          chown -R ${USER} ${HADOOP_LOG_DIR}
+          # If format-hdfs configmap present, format.
+          ! /tmp/scripts/exists_configmap.sh format-hdfs || (
+            for dir in $( echo "${DATANODE_DATA_DIR}" | tr ',' '\n')
+            do
+              rm -rf ${dir}
+            done
+          )
+          for dir in $( echo "${DATANODE_DATA_DIR}" | tr ',' '\n')
+          do
+            mkdir -p ${dir} || :
+            chown -R ${USER} ${dir}
+          done
+          df -h
+          cp /tmp/global-files/* /tmp/scratch/
+          # Wait for the nns to come up.
+          /tmp/scripts/jmxping.sh namenode ${HADOOP_SERVICE}
+        securityContext:
+          # Run bootstrapper as root so can set ${USER} owner on data volume
+          allowPrivilegeEscalation: false
+          runAsUser: 0
+        resources:
+          requests:
+            cpu: '0.2'
+            memory: 256Mi
+          limits:
+            cpu: '0.5'
+            memory: 512Mi
+        envFrom:
+        - configMapRef:
+            name: environment
+        env:
+        # Used by scripts that run during bootstrap
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        volumeMounts:
+        - mountPath: /data00
+          name: data00
+        - mountPath: /tmp/scripts
+          name: scripts
+        # Scratch dir is a location where init containers place items for later use
+        # by  the main containers when they run.
+        - mountPath: /tmp/scratch
+          name: scratch
+        - mountPath: /tmp/global-files
+          name: global-files
+      serviceAccountName: hadoop
+      volumes:
+      - configMap:
+          name: hadoop-configuration
+        name: hadoop-configuration
+      - configMap:
+          name: scripts
+          defaultMode: 0555
+        name: scripts
+      - configMap:
+          name: global-files
+        name: global-files
+      - emptyDir: {}
+        name: hadoop-logs
+      # Scratch dir is a location where init containers place items for later use
+      # by  the main containers when they run.
+      - emptyDir: {}
+        name: scratch
+  updateStrategy:
+    type: RollingUpdate
+  volumeClaimTemplates:
+  - metadata:
+      name: data00
+    spec:
+      accessModes: ["ReadWriteOnce"]
+      resources:
+        requests:
+          storage: 10Gi
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/hdfs-site.xml b/hbase-kubernetes-deployment/overlays/hdfs/hdfs-site.xml
new file mode 100644
index 0000000..2ae4249
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/hdfs-site.xml
@@ -0,0 +1,274 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>dfs.block.replicator.classname</name>
+    <value>org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant</value>
+  </property>
+  <property>
+    <name>dfs.blocksize</name>
+    <value>64m</value>
+  </property>
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:9866</value>
+  </property>
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>20m</value>
+  </property>
+  <property>
+    <name>dfs.datanode.balance.max.concurrent.moves</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>${env.DATANODE_DATA_DIR}</value>
+  </property>
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <value>1073741824</value>
+  </property>
+  <property>
+    <name>dfs.datanode.fileio.profiling.sampling.percentage</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:9864</value>
+  </property>
+  <property>
+    <name>dfs.datanode.https.address</name>
+    <value>0.0.0.0:9865</value>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:9867</value>
+  </property>
+  <property>
+    <name>dfs.datanode.max.locked.memory</name>
+    <value>0</value>
+  </property>
+  <property>
+    <name>dfs.datanode.peer.stats.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer.algorithm</name>
+    <value>rc4</value>
+  </property>
+  <property>
+    <name>dfs.ha.automatic-failover.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>dfs.ha.fencing.methods</name>
+    <value>shell(/usr/bin/true)</value>
+  </property>
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>${env.JOURNALNODE_DATA_DIR}</value>
+  </property>
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+  </property>
+  <property>
+    <name>dfs.journalnode.https-address</name>
+    <value>0.0.0.0:8481</value>
+  </property>
+  <property>
+    <name>dfs.journalnode.rpc-address</name>
+    <value>0.0.0.0:8485</value>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>64</value>
+  </property>
+  <!--
+  <property>
+    <name>dfs.namenode.hosts.provider.classname</name>
+    <value>org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager</value>
+  </property>
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/hosts.json</value>
+  </property>
+  -->
+  <!--Below bind-host 0.0.0.0 are needed when you port-forward to http/https on container-->
+  <property>
+    <name>dfs.namenode.http-bind-host</name>
+    <value>0.0.0.0</value>
+  </property>
+  <property>
+    <name>dfs.namenode.https-bind-host</name>
+    <value>0.0.0.0</value>
+  </property>
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <value>${env.NAMENODE_DATA_DIR}</value>
+  </property>
+  <property>
+    <name>dfs.namenode.replication.max-streams</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>dfs.namenode.replication.max-streams-hard-limit</name>
+    <value>40</value>
+  </property>
+  <property>
+    <name>dfs.namenode.replication.min</name>
+    <value>${env.DFS_REPLICATION}</value>
+  </property>
+  <property>
+    <name>dfs.namenode.replication.work.multiplier.per.iteration</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>0.9</value>
+  </property>
+  <property>
+    <name>dfs.namenode.service.handler.count</name>
+    <value>64</value>
+  </property>
+  <property>
+    <name>dfs.nameservices</name>
+    <value>${env.HADOOP_SERVICE}</value>
+  </property>
+  <property>
+    <name>dfs.reformat.disabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.replication</name>
+    <value>${env.DFS_REPLICATION}</value>
+  </property>
+  <property>
+    <name>dfs.replication.max</name>
+    <value>512</value>
+  </property>
+  <property>
+    <name>ipc.8020.callqueue.impl</name>
+    <value>org.apache.hadoop.ipc.FairCallQueue</value>
+  </property>
+  <property>
+    <name>ipc.8020.scheduler.impl</name>
+    <value>org.apache.hadoop.ipc.DecayRpcScheduler</value>
+  </property>
+  <property>
+    <name>zk-dt-secret-manager.zkAuthType</name>
+    <value>digest</value>
+  </property>
+  <property>
+    <name>zk-dt-secret-manager.digest.auth</name>
+    <value>@/etc/hadoop/zookeeper/auth/zk-auth.txt</value>
+  </property>
+  <property>
+    <name>zk-dt-secret-manager.zkConnectionString</name>
+    <value>TODO</value>
+  </property>
+  <property>
+    <name>zk-dt-secret-manager.znodeWorkingPath</name>
+    <value>TODO</value>
+  </property>
+  <property>
+    <name>dfs.client.failover.proxy.provider.hadoop</name>
+    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+  </property>
+  <!--Below we supply the pod running the namenode's full name.
+  -->
+  <property>
+    <name>dfs.ha.namenodes.hadoop</name>
+    <value>namenode-0</value>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address.hadoop.namenode-0</name>
+    <value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9870</value>
+  </property>
+  <property>
+    <name>dfs.namenode.https-address.hadoop.namenode-0</name>
+    <value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9871</value>
+  </property>
+  <property>
+    <name>dfs.namenode.rpc-address.hadoop.namenode-0</name>
+    <value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8020</value>
+  </property>
+  <property>
+    <name>dfs.namenode.servicerpc-address.hadoop.namenode-0</name>
+    <value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8022</value>
+  </property>
+  <property>
+    <name>dfs.namenode.lifeline.rpc-address.hadoop.namenode-0</name>
+    <value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8050</value>
+  </property>
+  <property>
+    <name>dfs.client.https.keystore.resource</name>
+    <value>ssl-client.xml</value>
+  </property>
+  <property>
+    <name>dfs.client.https.need-auth</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.http.policy</name>
+    <value>${env.HTTP_POLICY}</value>
+  </property>
+  <property>
+    <name>dfs.https.enable</name>
+    <value>${env.DFS_HTTPS_ENABLE}</value>
+  </property>
+  <property>
+    <name>dfs.https.server.keystore.resource</name>
+    <value>ssl-server.xml</value>
+  </property>
+  <property>
+    <name>dfs.namenode.acls.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>dfs.datanode.use.datanode.hostname</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>dfs.client.use.datanode.hostname</name>
+    <value>true</value>
+  </property>
+  <property>
+    <!--https://log.rowanto.com/posts/why-datanode-is-denied-communication-with-namenode/-->
+    <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.blockreport.intervalMsec</name>
+    <value>900000</value>
+    <description>Determines block reporting interval in milliseconds.
+    Report frequently else around recovery storms, the NN gets convinced
+    there is no block space left because of 'scheduled space' reserved.
+    </description>
+  </property>
+</configuration>
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/kustomization.yaml b/hbase-kubernetes-deployment/overlays/hdfs/kustomization.yaml
new file mode 100644
index 0000000..6bd3abe
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/kustomization.yaml
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+  app: hadoop
+
+configMapGenerator:
+- name: hadoop-configuration
+  # Add in single-instance namenode and datanode hdfs-site and core-site.
+  behavior: merge
+  files:
+  - hdfs-site.xml
+  - core-site.xml
+
+resources:
+- nn-statefulset.yaml
+- nn-service.yaml
+- dn-statefulset.yaml
+- dn-service.yaml
+- ../../base
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/nn-service.yaml b/hbase-kubernetes-deployment/overlays/hdfs/nn-service.yaml
new file mode 100644
index 0000000..66ac266
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/nn-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: namenode
+  labels:
+    jmxexporter: enabled
+spec:
+  selector:
+    role: namenode
+  clusterIP: None
+  ports:
+  - name: jmxexporter
+    port: 8000
diff --git a/hbase-kubernetes-deployment/overlays/hdfs/nn-statefulset.yaml b/hbase-kubernetes-deployment/overlays/hdfs/nn-statefulset.yaml
new file mode 100644
index 0000000..d36a61f
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hdfs/nn-statefulset.yaml
@@ -0,0 +1,325 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+  name: namenode
+spec:
+  minAvailable: 1
+  selector:
+    matchLabels:
+      role: namenode
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: namenode
+spec:
+  podManagementPolicy: Parallel
+  replicas: 1
+  selector:
+    matchLabels:
+      role: namenode
+  serviceName: hadoop
+  template:
+    metadata:
+      labels:
+        role: namenode
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - podAffinityTerm:
+              labelSelector:
+                matchLabels:
+                  role: namenode
+              topologyKey: kubernetes.io/hostname
+            weight: 30
+      containers:
+      - image: hadoop
+        name: namenode
+        imagePullPolicy: IfNotPresent
+        command:
+          - /bin/bash
+          - -c
+          - |-
+            # Shell context so we can pull in the environment variables set in the container and
+            # via the env and envFrom.
+            # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+            HADOOP_LOGFILE="hdfs-${HOSTNAME}.log" \
+            HDFS_NAMENODE_OPTS=" \
+              -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+              -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+              -Djava.security.properties=/tmp/scratch/java.security \
+              -javaagent:${JMX_PROMETHEUS_JAR}=8000:/tmp/scratch/jmxexporter.yaml \
+              -Djava.library.path=${HADOOP_HOME}/lib/native \
+              --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+              -Dio.netty.tryReflectionSetAccessible=true \
+              -Xlog:gc:/var/log/hadoop/gc.log:time,uptime:filecount=10,filesize=100M" \
+            hdfs namenode
+        # For now, just fetch local /jmx
+        # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+        livenessProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9871 if HTTPS
+            port: 9870
+          initialDelaySeconds: 1
+          failureThreshold: 6
+          periodSeconds: 10
+        readinessProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9871 if HTTPS
+            port: 9870
+          initialDelaySeconds: 10
+          failureThreshold: 3
+          periodSeconds: 10
+        startupProbe:
+          httpGet:
+            path: /jmx?qry=java.lang:type=OperatingSystem
+            # 9871 if HTTPS
+            port: 9870
+          initialDelaySeconds: 10
+          failureThreshold: 30
+          periodSeconds: 10
+        resources:
+          requests:
+            cpu: '0.4'
+            memory: 2Gi
+          limits:
+            cpu: '1'
+            memory: 3Gi
+        envFrom:
+        - configMapRef:
+            name: environment
+        env:
+        # The 'node' this container is running on, not hdfs namenode.
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: http
+          containerPort: 9870
+        - name: https
+          containerPort: 9871
+        - name: jmx
+          containerPort: 9870
+        - name: rpc
+          containerPort: 8020
+        - name: servicerpc
+          containerPort: 8022
+        - name: lifelinerpc
+          containerPort: 8050
+        volumeMounts:
+        - mountPath: /etc/hadoop
+          name: hadoop-configuration
+        - mountPath: /var/log/hadoop
+          name: hadoop-logs
+        - mountPath: /tmp/scratch
+          name: scratch
+        - mountPath: /tmp/scripts
+          name: scripts
+        - mountPath: /data00
+          name: data00
+      initContainers:
+      - image: hadoop
+        name: bootstrapper
+        imagePullPolicy: IfNotPresent
+        command:
+        # This container is running as root so can set permissions.
+        - /bin/bash
+        - -c
+        - |-
+          set -xe
+          if [ -n "${QJOURNAL}" ]; then
+            # If QJOURNAL, then HA and journalnodes are in the mix. Wait on them to come up.
+            /tmp/scripts/jmxping.sh journalnode ${HADOOP_SERVICE}
+          fi
+          # Copy over the files under global-files so in place for the runtime container.
+          cp /tmp/global-files/* /tmp/scratch/
+          # Set perms
+          chown -R ${USER} ${HADOOP_LOG_DIR}
+          # If format-hdfs configmap present, format.
+          find ${NAMENODE_DATA_DIR} || :
+          ! /tmp/scripts/exists_configmap.sh format-hdfs || (
+            rm -rf ${NAMENODE_DATA_DIR}
+          )
+          chmod 777 /data00
+        securityContext:
+          # Run bootstrapper as root so can set ${USER} owner on data volume
+          allowPrivilegeEscalation: false
+          runAsUser: 0
+        resources:
+          requests:
+            cpu: '0.2'
+            memory: 256Mi
+          limits:
+            cpu: '0.5'
+            memory: 512Mi
+        envFrom:
+        - configMapRef:
+            name: environment
+        env:
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        volumeMounts:
+        - mountPath: /etc/hadoop
+          name: hadoop-configuration
+        - mountPath: /var/log/hadoop
+          name: hadoop-logs
+        - mountPath: /data00
+          name: data00
+        - mountPath: /etc/hadoop/zookeeper/auth
+          name: zookeeper-credentials
+          readOnly: true
+        - mountPath: /tmp/scripts
+          name: scripts
+        # Scratch dir is a location where init containers place items for later use
+        # by  the main containers when they run.
+        - mountPath: /tmp/scratch
+          name: scratch
+        - mountPath: /tmp/global-files
+          name: global-files
+      - image: hadoop
+        name: format-hdfs
+        imagePullPolicy: IfNotPresent
+        command:
+        # Runs as the image/hdfs user.
+        - /bin/bash
+        - -c
+        - |-
+          set -xe
+          find /data00 || echo $?
+          # Run format if no nn dir.
+          if [ ! -d "${NAMENODE_DATA_DIR}" ]; then
+            ordinal=$(echo $POD_NAME | sed -e 's/^[^-]*-\(.*\)/\1/')
+            case $ordinal in
+              0)
+                hdfs namenode -format -nonInteractive || (
+                  # Perhaps another nn is active? If so, we should do bootstrap here instead.
+                  hdfs namenode -bootstrapStandby -nonInteractive
+                )
+                ;;
+              *)
+                hdfs namenode -bootstrapStandby -nonInteractive
+                ;;
+            esac
+          fi
+        resources:
+          requests:
+            cpu: '0.2'
+            memory: 256Mi
+          limits:
+            cpu: '0.5'
+            memory: 512Mi
+        envFrom:
+        - configMapRef:
+            name: environment
+        env:
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        volumeMounts:
+        - mountPath: /etc/hadoop
+          name: hadoop-configuration
+        - mountPath: /var/log/hadoop
+          name: hadoop-logs
+        - mountPath: /data00
+          name: data00
+        - mountPath: /etc/hadoop/zookeeper/auth
+          name: zookeeper-credentials
+          readOnly: true
+        - mountPath: /tmp/scripts
+          name: scripts
+        # Scratch dir is a location where init containers place items for later use
+        # by  the main containers when they run.
+        - mountPath: /tmp/scratch
+          name: scratch
+      serviceAccountName: hadoop
+      volumes:
+      - configMap:
+          name: hadoop-configuration
+        name: hadoop-configuration
+      - configMap:
+          name: scripts
+          defaultMode: 0555
+        name: scripts
+      - configMap:
+          name: global-files
+        name: global-files
+      - emptyDir: {}
+        name: hadoop-logs
+      # Scratch dir is a location where init containers place items for later use
+      # by  the main containers when they run.
+      - emptyDir: {}
+        name: scratch
+      - secret:
+          secretName: zookeeper-credentials
+          defaultMode: 400
+          optional: true
+        name: zookeeper-credentials
+  updateStrategy:
+    type: RollingUpdate
+  volumeClaimTemplates:
+  - metadata:
+      name: data00
+    spec:
+      accessModes: ["ReadWriteOnce"]
+      resources:
+        requests:
+          storage: 2Gi
diff --git a/hbase-kubernetes-deployment/tests/bin/kustomize_into_tmpdir.sh b/hbase-kubernetes-deployment/tests/bin/kustomize_into_tmpdir.sh
new file mode 100755
index 0000000..6b8debe
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/bin/kustomize_into_tmpdir.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Materialize a kustomize directory for a kuttl test.
+#
+# Kustomize is clunky for automated testing. It's pretty opinionated in that it will only evaluate
+# a directory off of disk -- you cannot generate a kustomization and pass it in via stdin.
+# In order to use kuttl generated namespaces within the kustomization, we have to modify the
+# kustomization.yaml before applying it. If we modify that file in the source tree, we end up with
+# the test namespace appended to the file under source control. So, this script creates a temp
+# directory, copies all the resources into that directory, and modifies the kustomization.yaml as
+# necessary. It then runs `kubectl apply -k` against that temporary directory.
+#
+
+declare DEBUG="${DEBUG:false}"
+if [ "${DEBUG}" = 'true' ] ; then
+  set -x
+fi
+
+set -eou pipefail
+
+declare NAMESPACE
+declare NEW_RESOURCES='[]'
+declare NEW_COMPONENTS='[]'
+declare kustomize_dir
+declare -a rewritten_resources=()
+declare -a rewritten_components=()
+
+kustomize_dir="$(mktemp -d -p /tmp "${NAMESPACE}.XXXXXXXXXX")"
+trap '[ -d "${kustomize_dir}" ] && rm -rf "${kustomize_dir}"' EXIT
+
+cp -r ./* "${kustomize_dir}/"
+
+for r in $(yq '.resources[]' kustomization.yaml) ; do
+  if [[ "${r}" =~ ^\.\./.* ]] ; then
+    # resolve the new relative location for any resource path that is not in the local directory
+    canonized="$(cd "${r}" ; pwd)"
+    r="../..${canonized}"
+  fi
+  rewritten_resources+=("'${r}'")
+done
+if [ "${#rewritten_resources[@]}" -gt 0 ] ; then
+    NEW_RESOURCES="[ $(printf '%s,' "${rewritten_resources[@]}") ]"
+fi
+
+for r in $(yq '.components[]' kustomization.yaml) ; do
+  if [[ "${r}" =~ ^\.\./.* ]] ; then
+    # resolve the new relative location for any resource path that is not in the local directory
+    canonized="$(cd "${r}" ; pwd)"
+    r="../..${canonized}"
+  fi
+  rewritten_components+=("'${r}'")
+done
+if [ "${#rewritten_components[@]}" -gt 0 ] ; then
+    NEW_COMPONENTS="[ $(printf '%s,' "${rewritten_components[@]}") ]"
+fi
+
+env NAMESPACE="${NAMESPACE}" \
+    NEW_RESOURCES="${NEW_RESOURCES}" \
+    NEW_COMPONENTS="${NEW_COMPONENTS}" \
+    yq -i '
+  .namespace = strenv(NAMESPACE) |
+  .resources = env(NEW_RESOURCES) |
+  .components = env(NEW_COMPONENTS)
+' "${kustomize_dir}/kustomization.yaml"
+
+if [ "${DEBUG}" = 'true' ] ; then
+  cat "${kustomize_dir}/kustomization.yaml"
+fi
+
+kubectl apply -k "${kustomize_dir}"
diff --git a/hbase-kubernetes-deployment/tests/integration/README.md b/hbase-kubernetes-deployment/tests/integration/README.md
new file mode 100644
index 0000000..69a91c2
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/README.md
@@ -0,0 +1,159 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+# Kubernetes Deployment Testing Using
+
+Defines a set of tests that are suitable for running against a target cluster -- they are not too
+resource intensive and do not require any vendor-specific extensions. It should be possible to run
+these tests against a multi-node KinD cluster, below are some notes to help a developer to run
+them locally.
+
+## Run the tests locally
+
+Assumes a Docker Desktop or some other docker-in-docker type of environment. First, prepare your
+cluster connection details such that they can be passed into the container context. Next, launch
+the test runner in a container:
+
+```shell
+$ docker container run \
+  --env KUBECONFIG=/workspace/your-kubeconfig \
+  --mount type=bind,source=$(PWD),target=/workspace \
+  -v /var/run/docker.sock:/var/run/docker.sock \
+  --workdir /workspace \
+  ${USER}/hbase/operator-tools/kuttl:latest \
+  --config tests/kuttl-test-integration.yaml \
+  --parallel 1
+```
+
+## Run the tests in AWS EKS
+
+It is possible to run these tests in AWS EKS. This requires configuring an RBAC on your target
+cluster that maps to an AIM profile. Next, define a profile in AWS configuration. When you launch
+the container, pass configuration and profile selection through to the running container.
+
+Building on the previous example,
+
+```shell
+$ docker container run \
+  --env AWS_PROFILE="your-profile" \
+  --env KUBECONFIG=/workspace/your-kubeconfig \
+  --mount type=bind,source=$(PWD),target=/workspace \
+  -v /var/run/docker.sock:/var/run/docker.sock \
+  -v ~/.aws:/root/.aws \
+  --workdir /workspace \
+  ${USER}/hbase/operator-tools/kuttl:latest \
+  --config tests/kuttl-test-integration.yaml
+```
+
+## Prepare a KinD cluster
+
+Ask KinD to create a cluster (and docker network), and export the configuration oriented as from
+inside the cluster. Start by creating a kind-config.yaml and configuring it for muliple nodes.
+See https://kind.sigs.k8s.io/docs/user/quick-start/#configuring-your-kind-cluster
+
+```shell
+$ kind create cluster --config kind-config.yaml
+...
+You can now use your cluster with:
+
+kubectl cluster-info --context kind --kubeconfig kubeconfig
+$ kind export kubeconfig --name kind --internal --kubeconfig kubeconfig-internal
+```
+
+## Local KinD Hacks
+
+Preparing and staging the large container images into the kind nodes is slow. Speed up the process
+a bit by creating a single-node KinD cluster and letting `kuttl` populate the images you need.
+
+First, find all the images used in your tests,
+
+```shell
+$ find tests/kind -type f -iname '*kustomization.yaml' \
+    -exec yq '.images[] | .newName + ":" + .newTag' {} + \
+  | sort -u
+hadoop:...
+hbase:...
+zookeeper:...
+```
+
+Pull those images locally.
+
+```shell
+$ docker image pull hadoop:...
+$ docker image pull hbase:...
+$ docker image pull zookeeper:...
+```
+
+Now make sure kuttl is using a docker volume for the containerd directory on each container, and
+populate those images into your kuttl configuration using this config snippet:
+
+```yaml
+kindNodeCache:
+  # Have kuttl create and mount volumes for a container image cache to each kind pod. Kuttl will
+  # reuse these mounts across runs, so we can save time the next the tests run.
+  true
+kindContainers:
+  # pre-populate the kind containers with these images pulled from the host registry. They'll be
+  # cached via `kindNodeCache`.
+- hadoop...
+- hbase...
+- zookeeper:...
+```
+
+When you run `kuttl` with this config, you'll see that it has mounted a volume for each container.
+It'll take a while, but `kuttl` will report its progress copying these container images.
+
+```
+== RUN   kuttl
+...
+    harness.go:202: node mount point /var/lib/docker/volumes/kind-0/_data
+...
+    harness.go:155: Starting KIND cluster
+    kind.go:66: Adding Containers to KIND...
+    kind.go:75: Add image zookeeper:... to node control-plane
+...
+```
+
+Once copied into one volume, create all the additional volumes you'll need and clone the original.
+Repeat this for every worker node you'd like in your cluster.
+
+```shell
+$ docker volume create --name kind-1
+$ docker container run --rm -it \
+  -v kind-0:/from \
+  -v kind-1:/to \
+  alpine ash -c "cd /from ; cp -a . /to"
+```
+
+In `kind-config.yaml`, specify the mount points for each of your KinD processes.
+
+```yaml
+nodes:
+- role: control-plane
+  extraMounts:
+  - &extra-mounts
+    hostPath: /var/lib/docker/volumes/kind-0/_data
+    containerPath: /var/lib/containerd
+    readOnly: false
+    propagation: HostToContainer
+- role: worker
+  extraMounts:
+  - <<: *extra-mounts
+    hostPath: /var/lib/docker/volumes/kind-1/_data
+...
+```
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-assert.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-assert.yaml
new file mode 100644
index 0000000..899c485
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-assert.yaml
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `StatefulSet` named "namenode" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: namenode
+status:
+  availableReplicas: 1
+---
+# assert that there is a `StatefulSet` named "datanode" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: datanode
+status:
+  availableReplicas: 1
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-kustomize.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-kustomize.yaml
new file mode 100644
index 0000000..3947f48
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/kustomization.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/kustomization.yaml
new file mode 100644
index 0000000..ba9a7bf
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hdfs/kustomization.yaml
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../test_base
+- ../../../overlays/hdfs
diff --git a/hbase-kubernetes-deployment/tests/integration/test_base/kustomization.yaml b/hbase-kubernetes-deployment/tests/integration/test_base/kustomization.yaml
new file mode 100644
index 0000000..387cf31
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/test_base/kustomization.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+  # Must repeat common labels and images in each overlay; can't inherit to keep each overlay independent
+  # https://github.com/kubernetes-sigs/kustomize/issues/915
+  # This label is used to open up calico network acls
+  app: hadoop
+
+resources:
+# When inter-pod networking is limited, apply this policy to open communications between pods that
+# bear the "hadoop" label.
+- networkpolicy.yaml
diff --git a/hbase-kubernetes-deployment/tests/integration/test_base/networkpolicy.yaml b/hbase-kubernetes-deployment/tests/integration/test_base/networkpolicy.yaml
new file mode 100644
index 0000000..8da0c53
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/test_base/networkpolicy.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Explicitly permit all traffic between Hadoop-related pods in our namespace
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+  name: allow-all
+spec:
+  podSelector:
+    matchLabels:
+      app: hadoop
+  ingress:
+    - {}
+  egress:
+    - {}
diff --git a/hbase-kubernetes-deployment/tests/kuttl-test-integration.yaml b/hbase-kubernetes-deployment/tests/kuttl-test-integration.yaml
new file mode 100644
index 0000000..e0b49b3
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/kuttl-test-integration.yaml
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Test runner using KUTTL against a target cluster.
+# https://kuttl.dev
+# https://kind.sigs.k8s.io
+---
+# Does not use Kuttl's built-in KIND support -- it doesn't quite work correctly with a VM-based
+# (Docker Desktop) style of runtime. Instead, assumes the cluster is established outside of kuttl
+# and configuration is provided via `--env`.
+apiVersion: kuttl.dev/v1beta1
+kind: TestSuite
+testDirs:
+- ./tests/integration
+timeout:
+  # these tests allocate several pods with dependencies between them, allow some time for
+  # everything to launch and settle.
+  300
+reportName: kuttl-report-integration
+reportFormat: xml
diff --git a/hbase-kubernetes-deployment/tests/kuttl-test-unit.yaml b/hbase-kubernetes-deployment/tests/kuttl-test-unit.yaml
new file mode 100644
index 0000000..04d1b51
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/kuttl-test-unit.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Test runner using https://kuttl.dev
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestSuite
+startControlPlane: true
+testDirs:
+- ./tests/unit
+reportName: kuttl-report-unit
+reportFormat: xml
diff --git a/hbase-kubernetes-deployment/tests/unit/base/00-assert.yaml b/hbase-kubernetes-deployment/tests/unit/base/00-assert.yaml
new file mode 100644
index 0000000..1e24de4
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/base/00-assert.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `ConfigMap` named "environment"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: environment
+---
+# assert that there is a `ConfigMap` named "global-files"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: global-files
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+#  java.security: ...
+#  jmxexporter.yaml: ...
+#  ssl-client.xml: ...
+#  ssl-server.xml: ...
+---
+# assert that there is a `ConfigMap` named "hadoop-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+#  name: hadoop-configuration-c94h8k249d
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+#  log4j.properties: ...
+---
+# assert that there is a `ConfigMap` named "scripts"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: scripts
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+#  apiserver_access.sh: ...
+#  ...
+---
+# assert that there is a `Secret` named "keystore-password"
+apiVersion: v1
+kind: Secret
+metadata:
+  name: keystore-password
+type: Opaque
+---
+# assert that there is a `Service` names "hadoop"
+apiVersion: v1
+kind: Service
+metadata:
+  name: hadoop
+---
+# assert that there is a `Job` named "delete-format-hdfs-configmap"
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: delete-format-hdfs-configmap
diff --git a/hbase-kubernetes-deployment/tests/unit/base/00-kustomize.yaml b/hbase-kubernetes-deployment/tests/unit/base/00-kustomize.yaml
new file mode 100644
index 0000000..3947f48
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/base/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/tests/unit/base/README.md b/hbase-kubernetes-deployment/tests/unit/base/README.md
new file mode 100644
index 0000000..e6a4aef
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/base/README.md
@@ -0,0 +1,24 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+# tests/unit/base
+
+A collection of asserts on the resources allocated by `hbase-kubernetes-deployment/base` that are
+not explicitly covered by a more specific test case.
+
+Hopefully the scope of this test case shrinks over time.
diff --git a/hbase-kubernetes-deployment/tests/unit/base/kustomization.yaml b/hbase-kubernetes-deployment/tests/unit/base/kustomization.yaml
new file mode 100644
index 0000000..e0ed1f4
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/base/kustomization.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+  - ../../../base
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-assert.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-assert.yaml
new file mode 100644
index 0000000..cf36710
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-assert.yaml
@@ -0,0 +1,73 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `ConfigMap` named "hadoop-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+#  name: hadoop-configuration-c94h8k249d
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+#  log4j.properties: ...
+#  hdfs-site.xml: ...
+#  core-site.xml: ...
+---
+# assert that there is a `PodDisruptionBudget` named "namenode"
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+  name: namenode
+---
+# assert that there is a `StatefulSet` named "namenode" that it provides pods labeled role:namenode
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: namenode
+spec:
+  template:
+    metadata:
+      labels:
+        role: namenode
+---
+# assert that there is a `Service` named "namenode" pointing to pods labeled role:namenode
+apiVersion: v1
+kind: Service
+metadata:
+  name: namenode
+spec:
+  selector:
+    role: namenode
+---
+# assert that there is a `StatefulSet` named "datanode" that it provides pods labeled role:datanode
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: datanode
+spec:
+  template:
+    metadata:
+      labels:
+        role: datanode
+---
+# assert that there is a `Service` named "datanode" pointing to pods labeled role:datanode
+apiVersion: v1
+kind: Service
+metadata:
+  name: datanode
+spec:
+  selector:
+    role: datanode
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-kustomize.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-kustomize.yaml
new file mode 100644
index 0000000..3947f48
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/kustomization.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/kustomization.yaml
new file mode 100644
index 0000000..cd111fc
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hdfs/kustomization.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../../../overlays/hdfs