You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by cs...@apache.org on 2018/04/17 12:42:54 UTC
[incubator-openwhisk-deploy-kube] branch master updated: Add
support for persistent storage. (#153)
This is an automated email from the ASF dual-hosted git repository.
csantanapr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk-deploy-kube.git
The following commit(s) were added to refs/heads/master by this push:
new 37fbcb7 Add support for persistent storage. (#153)
37fbcb7 is described below
commit 37fbcb7b32a8ee69790e31ca89e948bbadacfdff
Author: David Grove <dg...@users.noreply.github.com>
AuthorDate: Tue Apr 17 08:42:50 2018 -0400
Add support for persistent storage. (#153)
---
README.md | 9 +-
docker/couchdb/init.sh | 143 ++++++++++++----------
kubernetes/apigateway/README.md | 6 -
kubernetes/apigateway/apigateway.yml | 8 ++
kubernetes/cluster-setup/README.md | 19 +++
kubernetes/cluster-setup/persistent-volumes.yml | 154 ++++++++++++++++++++++++
kubernetes/couchdb/README.md | 19 ++-
kubernetes/couchdb/couchdb.yml | 8 ++
kubernetes/kafka/kafka.yml | 7 ++
kubernetes/zookeeper/zookeeper.yml | 17 +++
tools/travis/build.sh | 1 +
11 files changed, 306 insertions(+), 85 deletions(-)
diff --git a/README.md b/README.md
index a9f4501..d89c762 100644
--- a/README.md
+++ b/README.md
@@ -62,12 +62,13 @@ use to deploy OpenWhisk on Kubernetes for our Travis CI testing.
Do one of the following:
* For development and testing purposes, this repo includes a configuration
- for deploying a [simple non-persistent CouchDB instance](kubernetes/couchdb)
+ for deploying a [non-replicated CouchDB instance](kubernetes/couchdb)
within the Kubernetes cluster.
* For a production level CouchDB instance, take a look at the main
OpenWhisk [documentation for configuring CouchDB](https://github.com/apache/incubator-openwhisk/blob/master/tools/db/README.md).
You will need to define the db.auth secret and db.config configmap as described in the [CouchDB README.md](kubernetes/couchdb/README.md)
- to match your database deployment.
+ to match your database deployment and create a CouchDB service instance
+ that forwards connections to your external database.
## Deploy Remaining Components
@@ -101,10 +102,12 @@ Note: if you installed self-signed certificates when you configured Nginx, you w
At some point there might be a need to cleanup the Kubernetes environment.
For this, we want to delete all the OpenWhisk deployments, services, jobs
and whatever else might be there. This is easily accomplished by
-deleting the `openwhisk` namespace:
+deleting the `openwhisk` namespace and all persistent volumes labeled with
+pv-owner=openwhisk:
```
kubectl delete namespace openwhisk
+kubectl delete persistentvolume -lpv-owner=openwhisk
```
# Issues
diff --git a/docker/couchdb/init.sh b/docker/couchdb/init.sh
index e042057..a78b309 100755
--- a/docker/couchdb/init.sh
+++ b/docker/couchdb/init.sh
@@ -1,79 +1,90 @@
#!/bin/bash
set -ex
-# Always clone the latest version of OpenWhisk
-git clone https://github.com/apache/incubator-openwhisk /openwhisk
-
-pushd /openwhisk
- # if auth guest overwrite file
- if [ -n "$AUTH_GUEST" ]; then
- echo "$AUTH_GUEST" > /openwhisk/ansible/files/auth.guest
- fi
-
- # if auth whisk system overwrite file
- if [ -n "$AUTH_WHISK_SYSTEM" ]; then
- echo "$AUTH_WHISK_SYSTEM" > /openwhisk/ansible/files/auth.whisk.system
- fi
-
- # start couchdb with a background process
- /docker-entrypoint.sh /opt/couchdb/bin/couchdb &
-
- # wait for couchdb to be up and running
- TIMEOUT=0
- echo "wait for CouchDB to be up and running"
- until $( curl --output /dev/null --silent http://$DB_HOST:$DB_PORT/_utils ) || [ $TIMEOUT -eq 30 ]; do
+# start couchdb as a background process
+/docker-entrypoint.sh /opt/couchdb/bin/couchdb &
+
+# wait for couchdb to be up and running
+TIMEOUT=0
+echo "wait for CouchDB to be up and running"
+until $( curl --output /dev/null --silent http://$DB_HOST:$DB_PORT/_utils ) || [ $TIMEOUT -eq 30 ]; do
echo "waiting for CouchDB to be available"
sleep 2
let TIMEOUT=TIMEOUT+1
- done
+done
- if [ $TIMEOUT -eq 30 ]; then
+if [ $TIMEOUT -eq 30 ]; then
echo "failed to setup CouchDB"
exit 1
- fi
-
-
- # setup and initialize DB
- pushd ansible
- ansible-playbook -i environments/local setup.yml
- ansible-playbook -i environments/local couchdb.yml --tags ini \
- -e db_prefix=$DB_PREFIX \
- -e db_host=$DB_HOST \
- -e db_username=$COUCHDB_USER \
- -e db_password=$COUCHDB_PASSWORD \
- -e db_port=$DB_PORT \
- -e openwhisk_home=/openwhisk
- popd
-
- # disable reduce limits on views
- curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_node/couchdb@$NODENAME/_config/query_server_config/reduce_limit -d '"false"'
-
- # create the couchdb system databases
- curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_users
- curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_replicator
- curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_global_changes
-
- pushd ansible
- # initialize the DB
- ansible-playbook -i environments/local initdb.yml \
- -e db_prefix=$DB_PREFIX \
- -e db_host=$DB_HOST \
- -e db_username=$COUCHDB_USER \
- -e db_password=$COUCHDB_PASSWORD \
- -e db_port=$DB_PORT \
- -e openwhisk_home=/openwhisk
-
- # wipe the DB
- ansible-playbook -i environments/local wipe.yml \
- -e db_prefix=$DB_PREFIX \
- -e db_host=$DB_HOST \
- -e db_username=$COUCHDB_USER \
- -e db_password=$COUCHDB_PASSWORD \
- -e db_port=$DB_PORT \
- -e openwhisk_home=/openwhisk
- popd
-popd
+fi
+
+
+if [[ -f /opt/couchdb/data/_openwhisk_initialized.stamp ]]; then
+ # If we mounted an existing database from the persistent volume then use it.
+ echo "_openwhisk_initialized.stamp exists; using existing database"
+else
+ # No existing database; we will have to initialize it.
+
+ # Always clone the latest version of OpenWhisk
+ git clone https://github.com/apache/incubator-openwhisk /openwhisk
+
+ pushd /openwhisk
+ # if auth guest overwrite file
+ if [ -n "$AUTH_GUEST" ]; then
+ echo "$AUTH_GUEST" > /openwhisk/ansible/files/auth.guest
+ fi
+
+ # if auth whisk system overwrite file
+ if [ -n "$AUTH_WHISK_SYSTEM" ]; then
+ echo "$AUTH_WHISK_SYSTEM" > /openwhisk/ansible/files/auth.whisk.system
+ fi
+
+ # setup and initialize DB
+ pushd ansible
+ ansible-playbook -i environments/local setup.yml
+ ansible-playbook -i environments/local couchdb.yml --tags ini \
+ -e db_prefix=$DB_PREFIX \
+ -e db_host=$DB_HOST \
+ -e db_username=$COUCHDB_USER \
+ -e db_password=$COUCHDB_PASSWORD \
+ -e db_port=$DB_PORT \
+ -e openwhisk_home=/openwhisk
+ popd
+
+ # disable reduce limits on views
+ curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_node/couchdb@$NODENAME/_config/query_server_config/reduce_limit -d '"false"'
+
+ # create the couchdb system databases
+ curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_users
+ curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_replicator
+ curl -X PUT http://$COUCHDB_USER:$COUCHDB_PASSWORD@$DB_HOST:$DB_PORT/_global_changes
+
+ pushd ansible
+ # initialize the DB
+ ansible-playbook -i environments/local initdb.yml \
+ -e db_prefix=$DB_PREFIX \
+ -e db_host=$DB_HOST \
+ -e db_username=$COUCHDB_USER \
+ -e db_password=$COUCHDB_PASSWORD \
+ -e db_port=$DB_PORT \
+ -e openwhisk_home=/openwhisk
+
+ # wipe the DB
+ ansible-playbook -i environments/local wipe.yml \
+ -e db_prefix=$DB_PREFIX \
+ -e db_host=$DB_HOST \
+ -e db_username=$COUCHDB_USER \
+ -e db_password=$COUCHDB_PASSWORD \
+ -e db_port=$DB_PORT \
+ -e openwhisk_home=/openwhisk
+ popd
+
+ # stamp that we successfully initialized the database
+ date > /opt/couchdb/data/_openwhisk_initialized.stamp
+
+ popd
+fi
echo "successfully setup and configured CouchDB for OpenWhisk"
diff --git a/kubernetes/apigateway/README.md b/kubernetes/apigateway/README.md
index 672016a..719f485 100644
--- a/kubernetes/apigateway/README.md
+++ b/kubernetes/apigateway/README.md
@@ -9,9 +9,3 @@ To deploy the ApiGateway, you only need to run the following command:
kubectl apply -f apigateway.yml
```
-Note: The URL returned from `wsk api create` may contain a spurious
-:8080 due to its assumption about the meaning of PUBLIC_MANAGEDURL_HOST.
-Working on a fix to the upstream incubator-openwhisk-apigateway project
-to weaken the assumption that the API URL is constructed by concatenating
-PUBLIC_MANAGEDURL_HOST:PUBLIC_MANAGEDURL_PORT as this is not always
-appropriate for kube-based deployments.
diff --git a/kubernetes/apigateway/apigateway.yml b/kubernetes/apigateway/apigateway.yml
index 70e9d33..7aa8eea 100644
--- a/kubernetes/apigateway/apigateway.yml
+++ b/kubernetes/apigateway/apigateway.yml
@@ -47,10 +47,18 @@ spec:
- apigateway
topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: redis-data
+ persistentVolumeClaim:
+ claimName: pv-apigateway-01
+
containers:
- name: redis
imagePullPolicy: IfNotPresent
image: redis:3.2
+ volumeMounts:
+ - mountPath: /data
+ name: redis-data
- name: apigateway
imagePullPolicy: Always
image: openwhisk/apigateway
diff --git a/kubernetes/cluster-setup/README.md b/kubernetes/cluster-setup/README.md
index 740695d..d22309f 100644
--- a/kubernetes/cluster-setup/README.md
+++ b/kubernetes/cluster-setup/README.md
@@ -62,3 +62,22 @@ deployed OpenWhisk. If it is not defined those steps will fail.
kubectl -n openwhisk create secret generic whisk.auth --from-file=system=auth.whisk.system --from-file=guest=auth.guest
```
+
+### Create persistent volumes
+
+Several of the OpenWhisk implementation components you will deploy in
+subsequent steps require persistent storage to maintain their state
+across crashes and restarts. The general mechanism in Kubernetes for
+specifying storage needs and binding available storage to pods is
+to match Persistent Volumes to Persistent Volume Claims.
+
+The file persistent-volumes.yml file lists the PersistentVolume
+resources you will need to create and defines them in a manner
+appropriate for running OpenWhisk on minikube. If you are not
+deploying on minikube, you may need to edit this file to select
+PersistentVolume types provided by your cloud provider. After
+optionally editing the file, apply it with:
+
+```
+kubectl apply -f persistent-volumes.yml
+```
diff --git a/kubernetes/cluster-setup/persistent-volumes.yml b/kubernetes/cluster-setup/persistent-volumes.yml
new file mode 100644
index 0000000..89df09a
--- /dev/null
+++ b/kubernetes/cluster-setup/persistent-volumes.yml
@@ -0,0 +1,154 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-apigateway-01
+ labels:
+ pv-owner: openwhisk
+ pv-usage: pv-apigateway
+spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 1Gi
+ hostPath:
+ path: /data/pv-apigateway-01/
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: pv-apigateway-01
+ namespace: openwhisk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ selector:
+ matchLabels:
+ pv-usage: pv-apigateway
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-couchdb-01
+ labels:
+ pv-owner: openwhisk
+ pv-usage: pv-couchdb
+spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 2Gi
+ hostPath:
+ path: /data/pv-couchdb-01/
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: pv-couchdb-01
+ namespace: openwhisk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ pv-usage: pv-couchdb
+
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-kafka-01
+ labels:
+ pv-owner: openwhisk
+ pv-usage: pv-kafka
+spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 2Gi
+ hostPath:
+ path: /data/pv-kafka-01/
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: pv-kafka-01
+ namespace: openwhisk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ pv-usage: pv-kafka
+
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-zookeeper-data-01
+ labels:
+ pv-owner: openwhisk
+ pv-usage: pv-zookeeper-data
+spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 1Gi
+ hostPath:
+ path: /data/pv-zookeeper-01/
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: pv-zookeeper-data-01
+ namespace: openwhisk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ selector:
+ matchLabels:
+ pv-usage: pv-zookeeper-data
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-zookeeper-datalog-01
+ labels:
+ pv-owner: openwhisk
+ pv-usage: pv-zookeeper-datalog
+spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 1Gi
+ hostPath:
+ path: /data/pv-zookeeper-01/
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: pv-zookeeper-datalog-01
+ namespace: openwhisk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ selector:
+ matchLabels:
+ pv-usage: pv-zookeeper-datalog
diff --git a/kubernetes/couchdb/README.md b/kubernetes/couchdb/README.md
index dda82c9..c3c764f 100644
--- a/kubernetes/couchdb/README.md
+++ b/kubernetes/couchdb/README.md
@@ -31,13 +31,17 @@ Pod. This can be done by running:
kubectl apply -f couchdb.yml
```
-This pod goes through the process of pulling the OpenWhisk
-repo and running through some of the ansible playbooks for
-configuring CouchDB.
+If the persistent volume mounted as /opt/couchdb/data in the
+pod already has been initialized with an OpenWhisk CouchDB
+database, then the pod will simply use it. If an initialized
+database is not found, then the pod will go through the
+process of pulling the OpenWhisk git repo and running some of the
+ansible playbooks for configuring CouchDB.
**NOTE** the pod will say running as soon as the start command runs,
but that does not mean that CouchDB is really running and ready to
-use. It typically takes about a minute until setup has completed and
+use. If a new database actually needs to be created and initialized,
+it typically takes about a minute until setup has completed and
the database is actually usable. Examine the pods logs with
```
@@ -47,13 +51,8 @@ kubectl -n openwhisk logs -lname=couchdb
and look for the line:
```
-successfully setup and configured CouchDB
+successfully setup and configured CouchDB for OpenWhisk
```
This indicates that the CouchDB instance is fully configured and ready to use.
-## Persistence
-
-To create a persistent CouchDB instance, you will need
-to create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
-for the [couchdb.yml](couchdb.yml).
diff --git a/kubernetes/couchdb/couchdb.yml b/kubernetes/couchdb/couchdb.yml
index 5efc719..2b9dd81 100644
--- a/kubernetes/couchdb/couchdb.yml
+++ b/kubernetes/couchdb/couchdb.yml
@@ -65,6 +65,11 @@ spec:
- couchdb
topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: couchdb-data
+ persistentVolumeClaim:
+ claimName: pv-couchdb-01
+
containers:
- name: couchdb
imagePullPolicy: Always
@@ -73,6 +78,9 @@ spec:
ports:
- name: couchdb
containerPort: 5984
+ volumeMounts:
+ - mountPath: /opt/couchdb/data
+ name: couchdb-data
env:
- name: "DB_PREFIX"
valueFrom:
diff --git a/kubernetes/kafka/kafka.yml b/kubernetes/kafka/kafka.yml
index 3cdf980..621ba5e 100644
--- a/kubernetes/kafka/kafka.yml
+++ b/kubernetes/kafka/kafka.yml
@@ -14,6 +14,10 @@ spec:
name: kafka
spec:
restartPolicy: Always
+ volumes:
+ - name: kafka-data
+ persistentVolumeClaim:
+ claimName: pv-kafka-01
affinity:
# prefer to not run on an invoker node (only prefer because of single node clusters)
@@ -63,6 +67,9 @@ spec:
- name: kafka
imagePullPolicy: IfNotPresent
image: wurstmeister/kafka:0.11.0.1
+ volumeMounts:
+ - mountPath: /kafka
+ name: kafka-data
ports:
- name: kafka
containerPort: 9092
diff --git a/kubernetes/zookeeper/zookeeper.yml b/kubernetes/zookeeper/zookeeper.yml
index 41cdd1d..e513bd2 100644
--- a/kubernetes/zookeeper/zookeeper.yml
+++ b/kubernetes/zookeeper/zookeeper.yml
@@ -13,6 +13,13 @@ spec:
name: zookeeper
spec:
restartPolicy: Always
+ volumes:
+ - name: zk-data
+ persistentVolumeClaim:
+ claimName: pv-zookeeper-data-01
+ - name: zk-datalog
+ persistentVolumeClaim:
+ claimName: pv-zookeeper-datalog-01
affinity:
# prefer to not run on an invoker node (only prefer because of single node clusters)
@@ -57,6 +64,16 @@ spec:
containerPort: 2888
- name: leader-election
containerPort: 3888
+ volumeMounts:
+ - mountPath: /data
+ name: zk-data
+ - mountPath: /datalog
+ name: zk-data
+ env:
+ - name: "ZOO_DATA_DIR"
+ value: /data
+ - name: "ZOO_DATA_LOG_DIR"
+ value: /datalog
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
diff --git a/tools/travis/build.sh b/tools/travis/build.sh
index 28f1d89..d023c38 100755
--- a/tools/travis/build.sh
+++ b/tools/travis/build.sh
@@ -162,6 +162,7 @@ pushd kubernetes/cluster-setup
fi
kubectl -n openwhisk create cm whisk.limits --from-env-file=limits.env
kubectl -n openwhisk create secret generic whisk.auth --from-file=system=auth.whisk.system --from-file=guest=auth.guest
+ kubectl apply -f persistent-volumes.yml
popd
# configure Ingress and wsk CLI
--
To stop receiving notification emails like this one, please contact
csantanapr@apache.org.