You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by ho...@apache.org on 2017/07/26 19:35:17 UTC

[incubator-openwhisk-deploy-kube] branch master updated: Deploy Kafka manually (#44)

This is an automated email from the ASF dual-hosted git repository.

houshengbo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk-deploy-kube.git


The following commit(s) were added to refs/heads/master by this push:
     new 7865b25  Deploy Kafka manually (#44)
7865b25 is described below

commit 7865b250637e583e3da1eab6fdac74eb2214818d
Author: Dan Lavine <dl...@us.ibm.com>
AuthorDate: Wed Jul 26 19:35:15 2017 +0000

    Deploy Kafka manually (#44)
    
    * Current deployment has a custom built Docker image to
      do the Topic creations when Kafka is deployed.another option would
      be to have the Invoker/Controller Images have another container
      come up with them and create the Kafka topics. This way you are
      able to use commands like `kubectl scale ...`
    * Updated travis
    * Update Docs
---
 .travis/build.sh                                   | 14 ++++
 README.md                                          |  2 +
 .../environments/kube/files/kafka-service.yml      | 15 ----
 ansible-kube/environments/kube/files/kafka.yml     | 29 --------
 .../environments/kube/files/zookeeper-service.yml  | 21 ------
 ansible-kube/openwhisk.yml                         |  2 -
 ansible-kube/roles/kafka/tasks/deploy.yml          | 55 --------------
 configure/configure.sh                             |  5 --
 kubernetes/controller/README.md                    | 20 +++--
 kubernetes/kafka/README.md                         | 40 ++++++++++
 kubernetes/kafka/docker/Dockerfile                 |  5 ++
 kubernetes/kafka/docker/init.sh                    | 61 +++++++++++++++
 kubernetes/kafka/kafka.yml                         | 86 ++++++++++++++++++++++
 kubernetes/nginx/README.md                         | 15 +++-
 kubernetes/zookeeper/README.md                     | 10 +++
 .../files => kubernetes/zookeeper}/zookeeper.yml   | 22 ++++++
 16 files changed, 263 insertions(+), 139 deletions(-)

diff --git a/.travis/build.sh b/.travis/build.sh
index 731f287..cfc0ab0 100755
--- a/.travis/build.sh
+++ b/.travis/build.sh
@@ -106,6 +106,20 @@ statefulsetHealthCheck () {
 
 }
 
+# setup zookeeper
+pushd kubernetes/zookeeper
+  kubectl apply -f zookeeper.yml
+
+  deploymentHealthCheck "zookeeper"
+popd
+
+# setup kafka
+pushd kubernetes/kafka
+  kubectl apply -f kafka.yml
+
+  deploymentHealthCheck "kafka"
+popd
+
 # setup the controller
 pushd kubernetes/controller
   kubectl apply -f controller.yml
diff --git a/README.md b/README.md
index cd4f89a..e9a05a7 100644
--- a/README.md
+++ b/README.md
@@ -98,6 +98,8 @@ kubectl -n openwhisk logs configure-openwhisk-XXXXX
 
 Once the configuration job successfully finishes, you will need
 manually deploy the rest of the OpenWhisk components.
+* [Zookeeper](kubernetes/zookeeper/README.md)
+* [Kafka](kubernetes/kafka/README.md)
 * [Controller](kubernetes/controller/README.md)
 * [Invoker](kubernetes/invoker/README.md)
 * [Nginx](kubernetes/nginx/README.md)
diff --git a/ansible-kube/environments/kube/files/kafka-service.yml b/ansible-kube/environments/kube/files/kafka-service.yml
deleted file mode 100644
index 093ed76..0000000
--- a/ansible-kube/environments/kube/files/kafka-service.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: kafka
-  namespace: openwhisk
-  labels:
-    name: kafka
-spec:
-  selector:
-    name: kafka
-  ports:
-    - port: 9092
-      targetPort: 9092
-      name: kafka
diff --git a/ansible-kube/environments/kube/files/kafka.yml b/ansible-kube/environments/kube/files/kafka.yml
deleted file mode 100644
index 02785ec..0000000
--- a/ansible-kube/environments/kube/files/kafka.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: kafka
-  namespace: openwhisk
-  labels:
-    name: kafka
-spec:
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        name: kafka
-    spec:
-      restartPolicy: Always
-
-      containers:
-      - name: kafka
-        image: ches/kafka:0.10.0.1
-        imagePullPolicy: IfNotPresent
-        env:
-        - name: "KAFKA_ADVERTISED_HOST_NAME"
-          value: kafka.openwhisk
-        - name: "KAFKA_PORT"
-          value: "9092"
-        ports:
-        - name: kafka
-          containerPort: 9092
diff --git a/ansible-kube/environments/kube/files/zookeeper-service.yml b/ansible-kube/environments/kube/files/zookeeper-service.yml
deleted file mode 100644
index f48773c..0000000
--- a/ansible-kube/environments/kube/files/zookeeper-service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: zookeeper
-  namespace: openwhisk
-  labels:
-    name: zookeeper
-spec:
-  selector:
-    name: zookeeper
-  ports:
-    - port: 2181
-      targetPort: 2181
-      name: zookeeper
-    - port: 2888
-      targetPort: 2888
-      name: server
-    - port: 3888
-      targetPort: 3888
-      name: leader-election
diff --git a/ansible-kube/openwhisk.yml b/ansible-kube/openwhisk.yml
index f9ac5cb..760c28e 100644
--- a/ansible-kube/openwhisk.yml
+++ b/ansible-kube/openwhisk.yml
@@ -3,6 +3,4 @@
 # It assumes you have already set up your database with the respective db provider playbook (currently cloudant.yml or couchdb.yml)
 # It assumes that wipe.yml have being deployed at least once
 
-- include: kafka.yml
-
 #- include: routemgmt.yml
diff --git a/ansible-kube/roles/kafka/tasks/deploy.yml b/ansible-kube/roles/kafka/tasks/deploy.yml
deleted file mode 100644
index 9db4f65..0000000
--- a/ansible-kube/roles/kafka/tasks/deploy.yml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-# This role will install Kafka with Zookeeper in group 'kafka' in the environment inventory
-- name: create zookeeper deployment
-  shell: "kubectl apply -f {{kube_pod_dir}}/zookeeper.yml"
-
-- name: create kafka deployment
-  shell: "kubectl apply -f {{kube_pod_dir}}/kafka.yml"
-
-- name: get zookeeper pods
-  shell: "kubectl -n openwhisk get pods --show-all | grep zookeeper | awk '{print $1}'"
-  register: zookeeperPods
-  until: zookeeperPods.stdout != ""
-  retries: 5
-  delay: 2
-
-- name: set zookeeper pods
-  set_fact:
-    zookeeper_pods: "{{ zookeeperPods.stdout_lines }}"
-
-- name: get kafka pods
-  shell: "kubectl -n openwhisk get pods --show-all | grep kafka | awk '{print $1}'"
-  register: kafkaPods
-  until: kafkaPods.stdout != ""
-  retries: 5
-  delay: 2
-
-- name: set kafka pods
-  set_fact:
-    kafka_pods: "{{ kafkaPods.stdout_lines }}"
-
-- name: wait until the Zookeeper in this host is up and running
-  shell: "kubectl -n openwhisk exec {{ item[0] }} -c zookeeper -- bash -c 'echo ruok | nc -w 3 0.0.0.0:{{ zookeeper.port }}'"
-  register: result
-  until: (result.rc == 0) and (result.stdout == 'imok')
-  retries: 36
-  delay: 5
-  with_nested:
-    - ["{{ zookeeper_pods }}"]
-
-- name: wait until the kafka server started up
-  shell: "kubectl -n openwhisk logs {{ item[0] }} -c kafka"
-  register: result
-  until: ('[Kafka Server 0], started' in result.stdout)
-  retries: 36
-  delay: 5
-  with_nested:
-    - ["{{ kafka_pods }}"]
-
-- name: create the active-ack and health topic
-  shell: "kubectl exec {{ item[0] }} -c kafka -- bash -c 'unset JMX_PORT; kafka-topics.sh --create --topic {{ item[1] }} --replication-factor 1 --partitions 1 --zookeeper {{ zookeeper_host }}:{{ zookeeper.port }}'"
-  register: command_result
-  failed_when: "not ('Created topic' in command_result.stdout or 'already exists' in command_result.stdout)"
-  with_nested:
-  - "{{ kafka_pods }}"
-  - [ 'command', 'health' ]
diff --git a/configure/configure.sh b/configure/configure.sh
index 7e71bfe..a305b05 100755
--- a/configure/configure.sh
+++ b/configure/configure.sh
@@ -37,8 +37,6 @@ pushd /incubator-openwhisk-deploy-kube/ansible
 
   # Create all of the necessary services
   kubectl apply -f environments/kube/files/db-service.yml
-  kubectl apply -f environments/kube/files/zookeeper-service.yml
-  kubectl apply -f environments/kube/files/kafka-service.yml
 
   if deployCouchDB; then
     # Create and configure the CouchDB deployment
@@ -46,7 +44,4 @@ pushd /incubator-openwhisk-deploy-kube/ansible
     ansible-playbook -i environments/kube initdb.yml
     ansible-playbook -i environments/kube wipe.yml
   fi
-
-  # Run through the openwhisk deployment
-  ansible-playbook -i environments/kube openwhisk.yml
 popd
diff --git a/kubernetes/controller/README.md b/kubernetes/controller/README.md
index fee25c5..7a2f66f 100644
--- a/kubernetes/controller/README.md
+++ b/kubernetes/controller/README.md
@@ -16,16 +16,14 @@ kubectl apply -f invoker.yml
 ## Increase Controller Count
 
 If you want to increase the number of controllers deployed,
-then you will also need to update part of the Nginx configuration.
-First, you will need to update the replication count for the
-Controllers [here](https://github.com/apache/incubator-openwhisk-deploy-kube/tree/master/kubernetes/controller/controller.yml#L10).
-
-After updating the controller count, you will need to update
-the available routes for Nginx. This is because the controllers
-are not yet purely HA, but are in a failover mode. To update Nginx
-with the proper routes, take a look at
-[these properties](https://github.com/apache/incubator-openwhisk-deploy-kube/tree/master/kubernetes/nginx/nginx.conf#L15-L20).
-The routes for the controllers and how they are determined can
-be found in the [StatefulSet][StatefulSet] docs.
+you will need to update a number of properties, for Kafka and Nginx.
+
+* Kafka: Look at the Kafka [README](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/kafka/README.md)
+
+* Controller: You will need to update the replication count for the
+  Controllers [here](https://github.com/apache/incubator-openwhisk-deploy-kube/tree/master/kubernetes/controller/controller.yml#L10)
+  and redeploy.
+
+* Nginx: Take a look at the Nginx [README](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/nginx/README.md#increase-controller-count)
 
 [StatefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
diff --git a/kubernetes/kafka/README.md b/kubernetes/kafka/README.md
new file mode 100644
index 0000000..954058e
--- /dev/null
+++ b/kubernetes/kafka/README.md
@@ -0,0 +1,40 @@
+Kafka
+-----
+
+# Deploying
+
+To deploy Kafka, you will need to make sure that [Zookeeper](../zookeeper/README.md)
+is deployed. Otherwise Kafka will keep crashing since
+it cannot sync to a cluster. To actually deploy Kafka,
+just run:
+
+```
+kubectl apply -f kafka.yml
+```
+
+# Deployment Changes
+## Increase Invoker Pods
+
+When updating the invoker pod count you will need to update some Kafka
+and Invoker properties.
+
+* Kafka: The ["INVOKER_COUNT"](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/kafka/kafka.yml#L73)
+  property will need to equal the number of Invokers being deployed
+  and then you need to redeploy Kafka so that the new `invokerN`
+  topics are created.
+
+* Invoker: See the Invoker [README](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/invoker/README.md)
+
+## Increase Controller Pods
+
+When updating the Controller pod count, you will need to update the
+Kafka, Controller and Nginx deployments.
+
+* Kafka: The ["CONTROLLER_COUNT"](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/kafka/kafka.yml#L63)
+  property will need to equal the number of Controllers being deployed
+  and then you need to redeploy Kafka so that the new `completedN`
+  topics are created.
+
+* Controller: See the Controller [README](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/controller/README.md)
+
+* Nginx: See the Nginx [README](https://github.com/apache/incubator-openwhisk-deploy-kube/blob/master/kubernetes/nginx/README.md#increase-controller-count)
diff --git a/kubernetes/kafka/docker/Dockerfile b/kubernetes/kafka/docker/Dockerfile
new file mode 100644
index 0000000..fbdfab2
--- /dev/null
+++ b/kubernetes/kafka/docker/Dockerfile
@@ -0,0 +1,5 @@
+FROM ches/kafka:0.10.0.1
+
+COPY init.sh /init.sh
+
+CMD ["/init.sh"]
diff --git a/kubernetes/kafka/docker/init.sh b/kubernetes/kafka/docker/init.sh
new file mode 100755
index 0000000..4d9e9b0
--- /dev/null
+++ b/kubernetes/kafka/docker/init.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+set -m
+/start.sh &
+
+TIMEOUT=0
+PASSED=false
+echo "wait for Kafka to be up and running"
+until [ $TIMEOUT -eq 25 ]; do
+  echo "waiting for kafka to be available"
+
+  nc -z 127.0.0.1 $KAFKA_PORT
+  if [ $? -eq 0 ]; then
+    echo "kafka is up and running"
+    PASSED=true
+    break
+  fi
+
+  sleep 0.2
+  let TIMEOUT=TIMEOUT+1
+done
+
+if [ $PASSED = false ]; then
+  echo "failed to setup and reach kafka"
+  exit 1
+fi
+
+unset JMX_PORT
+
+set -x
+
+echo "Create health topic"
+OUTPUT=$(kafka-topics.sh --create --topic health --replication-factor $REPLICATION_FACTOR --partitions $PARTITIONS --zookeeper ${ZOOKEEPER_HOST}:${ZOOKEEPER_PORT} --config retention.bytes=$KAFKA_TOPICS_HEALTH_RETENTIONBYTES --config retention.ms=$KAFKA_TOPICS_HEALTH_RETENTIONMS --config segment.bytes=$KAFKA_TOPICS_HEALTH_SEGMENTBYTES)
+if ! ([[ "$OUTPUT" == *"already exists"* ]] || [[ "$OUTPUT" == *"Created topic"* ]]); then
+  echo "Failed to create heath topic"
+  exit 1
+fi
+
+echo "Create completed topics"
+CONTROLLER_COUNT=$((CONTROLLER_COUNT - 1))
+for i in `seq 0 $CONTROLLER_COUNT`; do
+  OUTPUT=$(kafka-topics.sh --create --topic completed$i --replication-factor $REPLICATION_FACTOR --partitions $PARTITIONS --zookeeper ${ZOOKEEPER_HOST}:${ZOOKEEPER_PORT} --config retention.bytes=$KAFKA_TOPICS_COMPLETED_RETENTIONBYTES --config retention.ms=$KAFKA_TOPICS_COMPLETED_RETENTIONMS --config segment.bytes=$KAFKA_TOPICS_COMPLETED_SEGMENTBYTES)
+
+  if ! ([[ "$OUTPUT" == *"already exists"* ]] || [[ "$OUTPUT" == *"Created topic"* ]]); then
+    echo "Failed to create completed$i topic"
+    exit 1
+  fi
+done
+
+echo "Create invoker topics"
+INVOKER_COUNT=$((INVOKER_COUNT - 1))
+for i in `seq 0 $INVOKER_COUNT`; do
+  OUTPUT=$(kafka-topics.sh --create --topic invoker$i --replication-factor $REPLICATION_FACTOR --partitions $PARTITIONS --zookeeper ${ZOOKEEPER_HOST}:${ZOOKEEPER_PORT} --config retention.bytes=$KAFKA_TOPICS_INVOKER_RETENTIONBYTES --config retention.ms=$KAFKA_TOPICS_INVOKER_RETENTIONMS --config segment.bytes=$KAFKA_TOPICS_INVOKER_SEGMENTBYTES)
+
+  if ! ([[ "$OUTPUT" == *"already exists"* ]] || [[ "$OUTPUT" == *"Created topic"* ]]); then
+    echo "Failed to create invoker$i topic"
+    exit 1
+  fi
+done
+
+fg
diff --git a/kubernetes/kafka/kafka.yml b/kubernetes/kafka/kafka.yml
new file mode 100644
index 0000000..5844486
--- /dev/null
+++ b/kubernetes/kafka/kafka.yml
@@ -0,0 +1,86 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kafka
+  namespace: openwhisk
+  labels:
+    name: kafka
+spec:
+  selector:
+    name: kafka
+  ports:
+    - port: 9092
+      targetPort: 9092
+      name: kafka
+
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: kafka
+  namespace: openwhisk
+  labels:
+    name: kafka
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        name: kafka
+    spec:
+      restartPolicy: Always
+
+      containers:
+      - name: kafka
+        imagePullPolicy: IfNotPresent
+        # custom kafka image. Not sure what we want to do with this.
+        image: danlavine/whisk_kafka
+        ports:
+        - name: kafka
+          containerPort: 9092
+        env:
+        - name: "KAFKA_ADVERTISED_HOST_NAME"
+          value: kafka.openwhisk
+        - name: "KAFKA_PORT"
+          value: "9092"
+
+        # message settings
+        - name: "REPLICATION_FACTOR"
+          value: "1"
+        - name: "PARTITIONS"
+          value: "1"
+
+        # health topic settings
+        - name: "KAFKA_TOPICS_HEALTH_RETENTIONBYTES"
+          value: "536870912"
+        - name: "KAFKA_TOPICS_HEALTH_RETENTIONMS"
+          value: "1073741824"
+        - name: "KAFKA_TOPICS_HEALTH_SEGMENTBYTES"
+          value: "3600000"
+
+        # complete topic settings
+        - name: "CONTROLLER_COUNT"
+          value: "2"
+        - name: "KAFKA_TOPICS_COMPLETED_RETENTIONBYTES"
+          value: "536870912"
+        - name: "KAFKA_TOPICS_COMPLETED_RETENTIONMS"
+          value: "1073741824"
+        - name: "KAFKA_TOPICS_COMPLETED_SEGMENTBYTES"
+          value: "3600000"
+
+        # invoker topic settings
+        - name: "INVOKER_COUNT"
+          value: "1"
+        - name: "KAFKA_TOPICS_INVOKER_RETENTIONBYTES"
+          value: "536870912"
+        - name: "KAFKA_TOPICS_INVOKER_RETENTIONMS"
+          value: "1073741824"
+        - name: "KAFKA_TOPICS_INVOKER_SEGMENTBYTES"
+          value: "3600000"
+
+        # zookeeper info
+        - name: "ZOOKEEPER_HOST"
+          value: "zookeeper.openwhisk"
+        - name: "ZOOKEEPER_PORT"
+          value: "2181"
diff --git a/kubernetes/nginx/README.md b/kubernetes/nginx/README.md
index 98402e0..a425927 100644
--- a/kubernetes/nginx/README.md
+++ b/kubernetes/nginx/README.md
@@ -1,6 +1,8 @@
 Nginx
 -----
 
+# Deploy Nginx
+
 The Nginx Pod needs to be configured with custom certificates
 and nginx configuration file. To achieve this, nginx will need
 to create a Kube ConfigMap for the `nginx.conf` file and a
@@ -39,7 +41,7 @@ namespace run the following command:
 kubectl -n openwhisk create secret tls nginx --cert=certs/cert.pem --key=certs/key.pem
 ```
 
-## Deploy Nginx
+## Deploying Nginx
 
 After successfully [creating the nginx ConfigMap](#create-nginx-configmap)
 and [creating the Secrets](#create-nginx-secrets)
@@ -49,6 +51,7 @@ you will be able to create the Nginx Service and Deployment.
 kubectl apply -f nginx.yml
 ```
 
+# Deployment Changes
 ## Update Nginx ConfigMap
 
 To update the nginx ConfigMap:
@@ -90,3 +93,13 @@ kubectl replace -f nginx_secrets.yml
 Kubernetes will then go through an update any deployed Nginx
 instances. Updating all of the keys defined in the nginx
 Secrets.
+
+## Increase Controller Count
+
+If you are updating the number of controllers being deployed with OpenWhiks
+from the default 2, you will need to make a few changes. The Nginx conf
+file has routes for Controller [StatefulSet][StatefulSet] addresses.
+Specifically [these lines](https://github.com/apache/incubator-openwhisk-deploy-kube/tree/master/kubernetes/nginx/nginx.conf#L15-L20).
+will need to be updated with a list of all available routes.
+
+[StatefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
diff --git a/kubernetes/zookeeper/README.md b/kubernetes/zookeeper/README.md
new file mode 100644
index 0000000..8d57c4b
--- /dev/null
+++ b/kubernetes/zookeeper/README.md
@@ -0,0 +1,10 @@
+Zookeeper
+-----
+
+# Deploying
+
+To deploy Zookeeper, you just need to run:
+
+```
+kubectl apply -f zookeeper.yml
+```
diff --git a/ansible-kube/environments/kube/files/zookeeper.yml b/kubernetes/zookeeper/zookeeper.yml
similarity index 61%
rename from ansible-kube/environments/kube/files/zookeeper.yml
rename to kubernetes/zookeeper/zookeeper.yml
index cd292c5..50123ed 100644
--- a/ansible-kube/environments/kube/files/zookeeper.yml
+++ b/kubernetes/zookeeper/zookeeper.yml
@@ -1,4 +1,26 @@
 ---
+apiVersion: v1
+kind: Service
+metadata:
+  name: zookeeper
+  namespace: openwhisk
+  labels:
+    name: zookeeper
+spec:
+  selector:
+    name: zookeeper
+  ports:
+    - port: 2181
+      targetPort: 2181
+      name: zookeeper
+    - port: 2888
+      targetPort: 2888
+      name: server
+    - port: 3888
+      targetPort: 3888
+      name: leader-election
+
+---
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" <co...@openwhisk.apache.org>'].