You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by da...@apache.org on 2018/07/04 02:01:27 UTC

[incubator-openwhisk-deploy-kube] branch master updated: Helm support for clustered zookeeper (#236)

This is an automated email from the ASF dual-hosted git repository.

daisyguo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk-deploy-kube.git


The following commit(s) were added to refs/heads/master by this push:
     new d76388b  Helm support for clustered zookeeper (#236)
d76388b is described below

commit d76388b24fea0068aaaa9277f94801fcf7a55202
Author: David Grove <dg...@users.noreply.github.com>
AuthorDate: Tue Jul 3 22:01:25 2018 -0400

    Helm support for clustered zookeeper (#236)
    
    Allow zookeeper.replicaCount to be set to values > 1 by
    properly configuring the zookeeper cluster and adjusting other
    parts of the Helm chart so that dependent services can
    properly communicate with the cluster.
    
    Also document which of the deployments do not yet support
    multiple replicas in docs/configurationChoices.md and
    via comments in values.yaml.
---
 docs/configurationChoices.md   |  10 ++++
 helm/templates/_helpers.tpl    |  13 +++--
 helm/templates/_readiness.tpl  |   4 +-
 helm/templates/controller.yaml |   1 +
 helm/templates/invoker.yaml    |   3 +-
 helm/templates/kafka.yaml      |   3 +-
 helm/templates/zookeeper.yaml  | 113 ++++++++++++++++++++++-------------------
 helm/values.yaml               |  14 +++++
 8 files changed, 101 insertions(+), 60 deletions(-)

diff --git a/docs/configurationChoices.md b/docs/configurationChoices.md
index 4545228..db8bfe4 100644
--- a/docs/configurationChoices.md
+++ b/docs/configurationChoices.md
@@ -36,6 +36,16 @@ controller:
   replicaCount: 2
 ```
 
+NOTE: The Helm-based deployment does not yet support setting the replicaCount
+to be greater than 1 for the following components:
+- apigateway
+- couchdb
+- kakfa
+- kakfaprovider
+- nginx
+- redis
+We are actively working on reducing this list and would welcome PRs to help.
+
 ### Using an external database
 
 You may want to use an external CouchDB or Cloudant instance instead
diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl
index 5faa4b3..d950380 100644
--- a/helm/templates/_helpers.tpl
+++ b/helm/templates/_helpers.tpl
@@ -30,9 +30,16 @@
 {{ .Values.redis.name }}.{{ .Release.Namespace }}.svc.cluster.local
 {{- end -}}
 
-{{/* hostname for zookeeper */}}
-{{- define "zookeeper_host" -}}
-{{ .Values.zookeeper.name }}.{{ .Release.Namespace }}.svc.cluster.local
+{{/* client connection string for zookeeper cluster (server1:port server2:port ... serverN:port)*/}}
+{{- define "zookeeper_connect" -}}
+{{- $zkname := .Values.zookeeper.name }}
+{{- $zkport := .Values.zookeeper.port }}
+{{- range $i, $e := until (int .Values.zookeeper.replicaCount) -}}{{ if ne $i 0 }},{{ end }}{{ $zkname }}-{{ . }}.{{ $zkname }}.{{ $.Release.Namespace }}.svc.cluster.local:{{ $zkport }}{{ end }}
+{{- end -}}
+
+{{/* host name for server.0 in zookeeper cluster */}}
+{{- define "zookeeper_zero_host" -}}
+{{ .Values.zookeeper.name }}-0.{{ .Values.zookeeper.name }}.{{ $.Release.Namespace }}.svc.cluster.local
 {{- end -}}
 
 {{/* Runtimes manifest */}}
diff --git a/helm/templates/_readiness.tpl b/helm/templates/_readiness.tpl
index fea7a24..3e06fdb 100644
--- a/helm/templates/_readiness.tpl
+++ b/helm/templates/_readiness.tpl
@@ -22,7 +22,7 @@
   image: "busybox"
   imagePullPolicy: "IfNotPresent"
   # TODO: I haven't found an easy external test to determine that kafka is up, so as a hack we wait for zookeeper and then sleep for 10 seconds and cross our fingers!
-  command: ["sh", "-c", 'result=1; until [ $result -eq 0 ]; do OK=$(echo ruok | nc -w 1 {{ include "zookeeper_host" . }} {{ .Values.zookeeper.port }}); if [ "$OK" == "imok" ]; then result=0; fi; echo waiting for zookeeper to be ready; sleep 1; done; echo zookeeper is up, sleeping for 10 seconds; sleep 10;']
+  command: ["sh", "-c", 'result=1; until [ $result -eq 0 ]; do OK=$(echo ruok | nc -w 1 {{ include "zookeeper_zero_host" . }} {{ .Values.zookeeper.port }}); if [ "$OK" == "imok" ]; then result=0; echo "zookeeper returned imok!"; fi; echo waiting for zookeeper to be ready; sleep 1; done; echo zookeeper is up, sleeping for 10 seconds; sleep 10;']
 {{- end -}}
 
 {{/* Init container that waits for zookeeper to be ready */}}
@@ -30,7 +30,7 @@
 - name: "wait-for-zookeeper"
   image: "busybox"
   imagePullPolicy: "IfNotPresent"
-  command: ["sh", "-c", 'result=1; until [ $result -eq 0 ]; do OK=$(echo ruok | nc -w 1 {{ include "zookeeper_host" . }} {{ .Values.zookeeper.port }}); if [ "$OK" == "imok" ]; then result=0; fi; echo waiting for zookeeper to be ready; sleep 1; done']
+  command: ["sh", "-c", 'result=1; until [ $result -eq 0 ]; do OK=$(echo ruok | nc -w 1 {{ include "zookeeper_zero_host" . }} {{ .Values.zookeeper.port }}); if [ "$OK" == "imok" ]; then result=0; echo "zookeeper returned imok!"; fi; echo waiting for zookeeper to be ready; sleep 1; done;']
 {{- end -}}
 
 {{/* Init container that waits for controller to be ready */}}
diff --git a/helm/templates/controller.yaml b/helm/templates/controller.yaml
index cda8d45..fc037ad 100644
--- a/helm/templates/controller.yaml
+++ b/helm/templates/controller.yaml
@@ -24,6 +24,7 @@ metadata:
   labels:
     name: {{ .Values.controller.name | quote }}
 spec:
+  podManagementPolicy: "Parallel"
   replicas: {{ .Values.controller.replicaCount }}
   name: {{ .Values.controller.name | quote }}
   template:
diff --git a/helm/templates/invoker.yaml b/helm/templates/invoker.yaml
index ee286e4..9a3ab11 100644
--- a/helm/templates/invoker.yaml
+++ b/helm/templates/invoker.yaml
@@ -16,6 +16,7 @@ metadata:
     name: {{ .Values.invoker.name | quote }}
 spec:
 {{- if eq .Values.invoker.containerFactory.impl "kubernetes" }}
+  podManagementPolicy: "Parallel"
   replicas: {{ .Values.invoker.containerFactory.kubernetes.replicaCount }}
 {{- end }}
   template:
@@ -136,7 +137,7 @@ spec:
 
           # properties for zookeeper connection
           - name: "ZOOKEEPER_HOSTS"
-            value: "{{ include "zookeeper_host" . }}:{{ .Values.zookeeper.port }}"
+            value: "{{ include "zookeeper_connect" . }}"
 
         ports:
         - name: invoker
diff --git a/helm/templates/kafka.yaml b/helm/templates/kafka.yaml
index d8b33cf..aed0f92 100644
--- a/helm/templates/kafka.yaml
+++ b/helm/templates/kafka.yaml
@@ -20,6 +20,7 @@ metadata:
   name: {{ .Values.kafka.name | quote }}
   namespace: {{ .Release.Namespace | quote }}
 spec:
+  podManagementPolicy: "Parallel"
   name: {{ .Values.kafka.name | quote }}
   replicas: {{ .Values.kafka.replicaCount }}
   template:
@@ -73,7 +74,7 @@ spec:
 
         # zookeeper info
         - name: "KAFKA_ZOOKEEPER_CONNECT"
-          value: "{{ include "zookeeper_host" . }}:{{ .Values.zookeeper.port }}"
+          value: "{{ include "zookeeper_connect" . }}"
 
 {{- if .Values.kafka.persistence.enabled }}
 ---
diff --git a/helm/templates/zookeeper.yaml b/helm/templates/zookeeper.yaml
index dc4d7f3..24069eb 100644
--- a/helm/templates/zookeeper.yaml
+++ b/helm/templates/zookeeper.yaml
@@ -11,6 +11,7 @@ metadata:
 spec:
   selector:
     name: {{ .Values.zookeeper.name | quote }}
+  clusterIP: None
   ports:
     - port: {{ .Values.zookeeper.port }}
       name: "zookeeper"
@@ -20,14 +21,35 @@ spec:
       name: "leader-election"
 
 ---
-apiVersion: extensions/v1beta1
-kind: Deployment
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ .Values.zookeeper.name | quote }}
+  namespace: {{ .Release.Namespace | quote }}
+data:
+  zoo.cfg: |
+    tickTime={{ .Values.zookeeper.config.tickTime }}
+    clientPort={{ .Values.zookeeper.port }}
+    initLimit={{ .Values.zookeeper.config.initLimit }}
+    syncLimit={{ .Values.zookeeper.config.syncLimit }}
+    dataDir={{ .Values.zookeeper.config.dataDir }}
+    dataLogDir={{ .Values.zookeeper.config.dataLogDir }}
+{{- $zkname := .Values.zookeeper.name }}
+{{- $zkserverport := .Values.zookeeper.serverPort }}
+{{- $zkelectionport := .Values.zookeeper.leaderElectionPort }}
+{{- range $i, $e := until (int .Values.zookeeper.replicaCount) }}
+    server.{{ . }}={{ $zkname }}-{{ . }}.{{ $zkname }}.{{ $.Release.Namespace }}.svc.cluster.local:{{ $zkserverport }}:{{ $zkelectionport }}
+{{- end }}
+
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
 metadata:
   name: {{ .Values.zookeeper.name | quote }}
   namespace: {{ .Release.Namespace | quote}}
-  labels:
-    name: {{ .Values.zookeeper.name | quote }}
 spec:
+  serviceName: {{ .Values.zookeeper.name | quote }}
+  podManagementPolicy: "Parallel"
   replicas: {{ .Values.zookeeper.replicaCount }}
   template:
     metadata:
@@ -36,26 +58,22 @@ spec:
     spec:
       restartPolicy: {{ .Values.zookeeper.restartPolicy | quote }}
 
-{{- if .Values.zookeeper.persistence.enabled }}
-      volumes:
-      - name: zk-data
-        persistentVolumeClaim:
-          claimName: "{{- .Values.zookeeper.persistence.pvcName -}}-data"
-      - name: zk-datalog
-        persistentVolumeClaim:
-          claimName: "{{- .Values.zookeeper.persistence.pvcName -}}-datalog"
-{{- end }}
-
-      {{- if .Values.affinity.enabled }}
+{{- if .Values.affinity.enabled }}
       affinity:
 {{ include "affinity.core" . | indent 8 }}
 {{ include "affinity.selfAntiAffinity" ( .Values.zookeeper.name | quote ) | indent 8 }}
-      {{- end }}
+{{- end }}
+
+      volumes:
+        - name: zk-config
+          configMap:
+            name: {{ .Values.zookeeper.name | quote }}
 
       containers:
       - name: {{ .Values.zookeeper.name | quote }}
         image: {{ .Values.zookeeper.image | quote }}
         imagePullPolicy: {{ .Values.zookeeper.imagePullPolicy | quote }}
+        command: ["/bin/bash", "-c", "hostname -s | cut -d'-' -f2 > {{ .Values.zookeeper.config.dataDir }}/myid; cat {{ .Values.zookeeper.config.dataDir }}/myid; cat /conf/zoo.cfg; zkServer.sh start-foreground"]
         ports:
         - name: zookeeper
           containerPort: {{ .Values.zookeeper.port }}
@@ -63,46 +81,35 @@ spec:
           containerPort: {{ .Values.zookeeper.serverPort }}
         - name: leader-election
           containerPort: {{ .Values.zookeeper.leaderElectionPort }}
-{{- if .Values.zookeeper.persistence.enabled }}
+
         volumeMounts:
-        - mountPath: /data
-          name: zk-data
-        - mountPath: /datalog
-          name: zk-datalog
-{{- end }}
-        env:
+        - mountPath: /conf
+          name: zk-config
 {{- if .Values.zookeeper.persistence.enabled }}
-        - name: "ZOO_DATA_DIR"
-          value: /data
-        - name: "ZOO_DATA_LOG_DIR"
-          value: /datalog
+        - mountPath: {{ .Values.zookeeper.config.dataDir }}
+          name: "{{- .Values.zookeeper.persistence.pvcName -}}-data"
+        - mountPath: {{ .Values.zookeeper.config.dataLogDir }}
+          name: "{{- .Values.zookeeper.persistence.pvcName -}}-datalog"
 {{- end }}
 
 {{- if .Values.zookeeper.persistence.enabled }}
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: "{{- .Values.zookeeper.persistence.pvcName -}}-data"
-  namespace: {{ .Release.Namespace | quote }}
-spec:
-  storageClassName: {{ .Values.zookeeper.persistence.storageClass }}
-  accessModes:
-    - {{ .Values.zookeeper.persistence.accessMode }}
-  resources:
-    requests:
-      storage: {{ .Values.zookeeper.persistence.size }}
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: "{{- .Values.zookeeper.persistence.pvcName -}}-datalog"
-  namespace: {{ .Release.Namespace | quote }}
-spec:
-  storageClassName: {{ .Values.zookeeper.persistence.storageClass }}
-  accessModes:
-    - {{ .Values.zookeeper.persistence.accessMode }}
-  resources:
-    requests:
-      storage: {{ .Values.zookeeper.persistence.size }}
+  volumeClaimTemplates:
+  - metadata:
+      name: "{{- .Values.zookeeper.persistence.pvcName -}}-data"
+    spec:
+      storageClassName: {{ .Values.zookeeper.persistence.storageClass }}
+      accessModes:
+        - {{ .Values.zookeeper.persistence.accessMode }}
+      resources:
+        requests:
+          storage: {{ .Values.zookeeper.persistence.size }}
+  - metadata:
+      name: "{{- .Values.zookeeper.persistence.pvcName -}}-datalog"
+    spec:
+      storageClassName: {{ .Values.zookeeper.persistence.storageClass }}
+      accessModes:
+        - {{ .Values.zookeeper.persistence.accessMode }}
+      resources:
+        requests:
+          storage: {{ .Values.zookeeper.persistence.size }}
 {{- end }}
diff --git a/helm/values.yaml b/helm/values.yaml
index 21d3068..9b86d11 100644
--- a/helm/values.yaml
+++ b/helm/values.yaml
@@ -51,6 +51,7 @@ docker:
 zookeeper:
   name: "zookeeper"
   image: "zookeeper:3.4"
+  # Note: Zookeeper's quorum protocol is designed to have an odd number of replicas.
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -63,11 +64,19 @@ zookeeper:
     size: 2Gi
     storageClass: default
     accessMode: ReadWriteOnce
+  # Default values for entries in zoo.cfg (see Apache Zookeeper documentation for semantics)
+  config:
+    tickTime: 2000
+    initLimit: 5
+    syncLimit: 2
+    dataDir: "/data"
+    dataLogDir: "/datalog"
 
 # kafka configurations
 kafka:
   name: "kafka"
   image: "wurstmeister/kafka:0.11.0.1"
+  # NOTE: setting replicaCount > 1 will not work...actively being worked on.
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -84,6 +93,7 @@ db:
   external: false
   name: "couchdb"
   image: "openwhisk/kube-couchdb:latest"
+  # NOTE: setting replicaCount > 1 will not work.
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -110,6 +120,7 @@ db:
 nginx:
   name: "nginx"
   image: "nginx:1.11"
+  # NOTE: setting replicaCount > 1 is not tested and may not work
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -166,6 +177,7 @@ invoker:
 apigw:
   name: "apigateway"
   image: "openwhisk/apigateway:latest"
+  # NOTE: setting replicaCount > 1 is not tested and may not work
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -176,6 +188,7 @@ apigw:
 redis:
   name: "redis"
   image: "redis:3.2"
+  # NOTE: setting replicaCount > 1 will not work; need to add redis cluster configuration
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"
@@ -202,6 +215,7 @@ affinity:
 kafkaprovider:
   name: "kafkaprovider"
   image: "openwhisk/kafkaprovider:latest"
+  # NOTE: setting replicaCount > 1 has not been tested and may not work
   replicaCount: 1
   imagePullPolicy: "IfNotPresent"
   restartPolicy: "Always"