You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@servicecomb.apache.org by GitBox <gi...@apache.org> on 2018/08/23 01:57:33 UTC

[GitHub] little-cui closed pull request #423: SCB-857 Provider rule of consumer can not be removed

little-cui closed pull request #423: SCB-857 Provider rule of consumer can not be removed
URL: https://github.com/apache/incubator-servicecomb-service-center/pull/423
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/README.md b/docs/README.md
index eb144af0..ba6aef1a 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -22,7 +22,7 @@
 
 - [In Kubernetes Cluster](/integration/k8s)
 
-- [In VM](/docs/sc-cluster.md)
+- [In VMs](/docs/sc-cluster.md)
 
 - [Deploy with TLS](/docs/security-tls.md)
 
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/.helmignore b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/Chart.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/Chart.yaml
new file mode 100644
index 00000000..36f89243
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+appVersion: 0.9.2
+description: CoreOS etcd-operator Helm chart for Kubernetes
+home: https://github.com/coreos/etcd-operator
+icon: https://raw.githubusercontent.com/coreos/etcd/master/logos/etcd-horizontal-color.png
+maintainers:
+- email: chance.zibolski@coreos.com
+  name: chancez
+- email: lachlan@deis.com
+  name: lachie83
+- email: jaescobar.cell@gmail.com
+  name: alejandroEsc
+name: etcd-operator
+sources:
+- https://github.com/coreos/etcd-operator
+version: 0.8.0
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/OWNERS b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/OWNERS
new file mode 100644
index 00000000..e7cf8709
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/OWNERS
@@ -0,0 +1,8 @@
+approvers:
+- lachie83
+- chancez
+- alejandroEsc
+reviewers:
+- lachie83
+- chancez
+- alejandroEsc
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/README.md b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/README.md
new file mode 100644
index 00000000..746d73d7
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/README.md
@@ -0,0 +1,158 @@
+# CoreOS etcd-operator
+
+[etcd-operator](https://coreos.com/blog/introducing-the-etcd-operator.html) Simplify etcd cluster
+configuration and management.
+
+__DISCLAIMER:__ While this chart has been well-tested, the etcd-operator is still currently in beta.
+Current project status is available [here](https://github.com/coreos/etcd-operator).
+
+## Introduction
+
+This chart bootstraps an etcd-operator and allows the deployment of etcd-cluster(s).
+
+## Official Documentation
+
+Official project documentation found [here](https://github.com/coreos/etcd-operator)
+
+## Prerequisites
+
+- Kubernetes 1.4+ with Beta APIs enabled
+- __Suggested:__ PV provisioner support in the underlying infrastructure to support backups
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install stable/etcd-operator --name my-release
+```
+
+__Note__: If you set `cluster.enabled` on install, it will have no effect.
+Before you create an etcd cluster, the TPR must be installed by the operator, so this option is ignored during helm installs, but can be used in upgrades.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components EXCEPT the persistent volume.
+
+## Updating
+Updating the TPR resource will not result in the cluster being update until `kubectl apply` for
+TPRs is fixed see [kubernetes/issues/29542](https://github.com/kubernetes/kubernetes/issues/29542)
+Work around options are documented [here](https://github.com/coreos/etcd-operator#resize-an-etcd-cluster)
+
+## Configuration
+
+The following table lists the configurable parameters of the etcd-operator chart and their default values.
+
+| Parameter                                         | Description                                                          | Default                                        |
+| ------------------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------- |
+| `rbac.create`                                     | install required RBAC service account, roles and rolebindings        | `true`                                         |
+| `rbac.apiVersion`                                 | RBAC api version `v1alpha1|v1beta1`                                  | `v1beta1`                                      |
+| `rbac.etcdOperatorServiceAccountName`             | Name of the service account resource when RBAC is enabled            | `etcd-operator-sa`                                      |
+| `rbac.backupOperatorServiceAccountName`           | Name of the service account resource when RBAC is enabled            | `etcd-backup-operator-sa`                                      |
+| `rbac.restoreOperatorServiceAccountName`          | Name of the service account resource when RBAC is enabled            | `etcd-restore-operator-sa`                                      |
+| `deployments.etcdOperator`                        | Deploy the etcd cluster operator                                     | `true`                                         |
+| `deployments.backupOperator`                      | Deploy the etcd backup operator                                      | `true`                                         |
+| `deployments.restoreOperator`                     | Deploy the etcd restore operator                                     | `true`                                         |
+| `customResources.createEtcdClusterCRD`            | Create a custom resource: EtcdCluster                                | `false`                                        |
+| `customResources.createBackupCRD`                 | Create an a custom resource: EtcdBackup                              | `false`                                        |
+| `customResources.createRestoreCRD`                | Create an a custom resource: EtcdRestore                             | `false`                                        |
+| `etcdOperator.name`                               | Etcd Operator name                                                   | `etcd-operator`                                |
+| `etcdOperator.replicaCount`                       | Number of operator replicas to create (only 1 is supported)          | `1`                                            |
+| `etcdOperator.image.repository`                   | etcd-operator container image                                        | `quay.io/coreos/etcd-operator`                 |
+| `etcdOperator.image.tag`                          | etcd-operator container image tag                                    | `v0.7.0`                                       |
+| `etcdOperator.image.pullpolicy`                   | etcd-operator container image pull policy                            | `Always`                                       |
+| `etcdOperator.resources.cpu`                      | CPU limit per etcd-operator pod                                      | `100m`                                         |
+| `etcdOperator.resources.memory`                   | Memory limit per etcd-operator pod                                   | `128Mi`                                        |
+| `etcdOperator.nodeSelector`                       | Node labels for etcd operator pod assignment                         | `{}`                                           |
+| `etcdOperator.commandArgs`                        | Additional command arguments                                         | `{}`                                           |
+| `backupOperator.name`                             | Backup operator name                                                 | `etcd-backup-operator`                         |
+| `backupOperator.replicaCount`                     | Number of operator replicas to create (only 1 is supported)          | `1`                                            |
+| `backupOperator.image.repository`                 | Operator container image                                             | `quay.io/coreos/etcd-operator`                 |
+| `backupOperator.image.tag`                        | Operator container image tag                                         | `v0.7.0`                                       |
+| `backupOperator.image.pullpolicy`                 | Operator container image pull policy                                 | `Always`                                       |
+| `backupOperator.resources.cpu`                    | CPU limit per etcd-operator pod                                      | `100m`                                         |
+| `backupOperator.resources.memory`                 | Memory limit per etcd-operator pod                                   | `128Mi`                                        |
+| `backupOperator.spec.storageType`                 | Storage to use for backup file, currently only S3 supported          | `S3`                                           |
+| `backupOperator.spec.s3.s3Bucket`                 | Bucket in S3 to store backup file                                    |                                                |
+| `backupOperator.spec.s3.awsSecret`                | Name of kubernetes secret containing aws credentials                |                                                |
+| `backupOperator.nodeSelector`                     | Node labels for etcd operator pod assignment                         | `{}`                                           |
+| `backupOperator.commandArgs`                      | Additional command arguments                                         | `{}`                                           |
+| `restoreOperator.name`                            | Restore operator name                                                | `etcd-backup-operator`                         |
+| `restoreOperator.replicaCount`                    | Number of operator replicas to create (only 1 is supported)          | `1`                                            |
+| `restoreOperator.image.repository`                | Operator container image                                             | `quay.io/coreos/etcd-operator`                 |
+| `restoreOperator.image.tag`                       | Operator container image tag                                         | `v0.7.0`                                       |
+| `restoreOperator.image.pullpolicy`                | Operator container image pull policy                                 | `Always`                                       |
+| `restoreOperator.resources.cpu`                   | CPU limit per etcd-operator pod                                      | `100m`                                         |
+| `restoreOperator.resources.memory`                | Memory limit per etcd-operator pod                                   | `128Mi`                                        |
+| `restoreOperator.spec.s3.path`                    | Path in S3 bucket containing the backup file                         |                                                |
+| `restoreOperator.spec.s3.awsSecret`               | Name of kubernetes secret containing aws credentials                |                                                |
+| `restoreOperator.nodeSelector`                    | Node labels for etcd operator pod assignment                         | `{}`                                           |
+| `restoreOperator.commandArgs`                     | Additional command arguments                                         | `{}`                                           |
+| `etcdCluster.name`                                | etcd cluster name                                                    | `etcd-cluster`                                 |
+| `etcdCluster.size`                                | etcd cluster size                                                    | `3`                                            |
+| `etcdCluster.version`                             | etcd cluster version                                                 | `3.2.10`                                       |
+| `etcdCluster.image.repository`                    | etcd container image                                                 | `quay.io/coreos/etcd-operator`                 |
+| `etcdCluster.image.tag`                           | etcd container image tag                                             | `v3.2.10`                                      |
+| `etcdCluster.image.pullPolicy`                    | etcd container image pull policy                                     | `Always`                                       |
+| `etcdCluster.enableTLS`                           | Enable use of TLS                                                    | `false`                                        |
+| `etcdCluster.tls.static.member.peerSecret`        | Kubernetes secret containing TLS peer certs                          | `etcd-peer-tls`                                |
+| `etcdCluster.tls.static.member.serverSecret`      | Kubernetes secret containing TLS server certs                        | `etcd-server-tls`                              |
+| `etcdCluster.tls.static.operatorSecret`           | Kubernetes secret containing TLS client certs                        | `etcd-client-tls`                              |
+| `etcdCluster.pod.antiAffinity`                    | Whether etcd cluster pods should have an antiAffinity                | `false`                                        |
+| `etcdCluster.pod.resources.limits.cpu`            | CPU limit per etcd cluster pod                                       | `100m`                                         |
+| `etcdCluster.pod.resources.limits.memory`         | Memory limit per etcd cluster pod                                    | `128Mi`                                        |
+| `etcdCluster.pod.resources.requests.cpu`          | CPU request per etcd cluster pod                                     | `100m`                                         |
+| `etcdCluster.pod.resources.requests.memory`       | Memory request per etcd cluster pod                                  | `128Mi`                                        |
+| `etcdCluster.pod.nodeSelector`                    | node labels for etcd cluster pod assignment                          | `{}`                                           |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example:
+
+```bash
+$ helm install --name my-release --set image.tag=v0.2.1 stable/etcd-operator
+```
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while
+installing the chart. For example:
+
+```bash
+$ helm install --name my-release --values values.yaml stable/etcd-operator
+```
+
+## RBAC
+By default the chart will install the recommended RBAC roles and rolebindings.
+
+To determine if your cluster supports this running the following:
+
+```console
+$ kubectl api-versions | grep rbac
+```
+
+You also need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)
+
+```
+--authorization-mode=RBAC
+```
+
+If the output contains "beta" or both "alpha" and "beta" you can may install rbac by default, if not, you may turn RBAC off as described below.
+
+### RBAC role/rolebinding creation
+
+RBAC resources are enabled by default. To disable RBAC do the following:
+
+```console
+$ helm install --name my-release stable/etcd-operator --set rbac.create=false
+```
+
+### Changing RBAC manifest apiVersion
+
+By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following:
+
+```console
+$ helm install --name my-release stable/etcd-operator --set rbac.install=true,rbac.apiVersion=v1alpha1
+```
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/NOTES.txt b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/NOTES.txt
new file mode 100644
index 00000000..c33ee014
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/NOTES.txt
@@ -0,0 +1,33 @@
+{{- $clusterEnabled := (and (not .Release.IsInstall) .Values.customResources.createEtcdClusterCRD) -}}
+{{- if and .Release.IsInstall .Values.customResources.createEtcdClusterCRD -}}
+Not enabling cluster, the ThirdPartResource must be installed before you can create a Cluster. Continuing rest of normal deployment.
+
+{{ end -}}
+
+{{- if $clusterEnabled -}}
+1. Watch etcd cluster start
+  kubectl get pods -l etcd_cluster={{ .Values.etcdCluster.name }} --namespace {{ .Release.Namespace }} -w
+
+2. Confirm etcd cluster is healthy
+  $ kubectl run --rm -i --tty --env="ETCDCTL_API=3" --env="ETCDCTL_ENDPOINTS=http://{{ .Values.etcdCluster.name }}-client:2379" --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh -c 'watch -n1 "etcdctl  member list"'
+
+3. Interact with the cluster!
+  $ kubectl run --rm -i --tty --env ETCDCTL_API=3 --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh
+  / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 put foo bar
+  / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 get foo
+  OK
+  (ctrl-D to exit)
+  
+4. Optional
+  Check the etcd-operator logs
+  export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
+  kubectl logs $POD --namespace={{ .Release.Namespace }}
+
+{{- else -}}
+1. etcd-operator deployed.
+  If you would like to deploy an etcd-cluster set cluster.enabled to true in values.yaml
+  Check the etcd-operator logs
+    export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
+    kubectl logs $POD --namespace={{ .Release.Namespace }}
+
+{{- end -}}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/_helpers.tpl b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..03f9a26b
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/_helpers.tpl
@@ -0,0 +1,75 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "etcd-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.etcdOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "etcd-backup-operator.name" -}}
+{{- default .Chart.Name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-backup-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "etcd-restore-operator.name" -}}
+{{- default .Chart.Name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-restore-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the etcd-operator service account to use
+*/}}
+{{- define "etcd-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.etcdOperatorServiceAccount.create -}}
+    {{ default (include "etcd-operator.fullname" .) .Values.serviceAccount.etcdOperatorServiceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.etcdOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the backup-operator service account to use 
+*/}}
+{{- define "etcd-backup-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.backupOperatorServiceAccount.create -}}
+    {{ default (include "etcd-backup-operator.fullname" .) .Values.serviceAccount.backupOperatorServiceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.backupOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the restore-operator service account to use 
+*/}}
+{{- define "etcd-restore-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.restoreOperatorServiceAccount.create -}}
+    {{ default (include "etcd-restore-operator.fullname" .) .Values.serviceAccount.restoreOperatorServiceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.restoreOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-etcd-crd.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-etcd-crd.yaml
new file mode 100644
index 00000000..5528f766
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-etcd-crd.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.customResources.createBackupCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdBackup"
+metadata:
+  name: {{ template "etcd-backup-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-backup-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+  annotations:
+    "helm.sh/hook": "post-install"
+    "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+  clusterName: {{ .Values.etcdCluster.name }}
+{{ toYaml .Values.backupOperator.spec | indent 2 }}
+{{- end}}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..526b2454
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.backupOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+metadata:
+  name: {{ template "etcd-backup-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+  name: {{ template "etcd-backup-operator.serviceAccountName" . }}
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-deployment.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-deployment.yaml
new file mode 100644
index 00000000..d5c421c1
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-deployment.yaml
@@ -0,0 +1,59 @@
+{{- if .Values.deployments.backupOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: {{ template "etcd-backup-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-backup-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "etcd-backup-operator.fullname" . }}
+      release: {{ .Release.Name }}
+  replicas: {{ .Values.backupOperator.replicaCount }}
+  template:
+    metadata:
+      name: {{ template "etcd-backup-operator.fullname" . }}
+      labels:
+        app: {{ template "etcd-backup-operator.fullname" . }}
+        release: {{ .Release.Name }}
+    spec:
+      serviceAccountName: {{ template "etcd-backup-operator.serviceAccountName" . }}
+      containers:
+      - name: {{ .Values.backupOperator.name }}
+        image: "{{ .Values.backupOperator.image.repository }}:{{ .Values.backupOperator.image.tag }}"
+        imagePullPolicy: {{ .Values.backupOperator.image.pullPolicy }}
+        command:
+        - etcd-backup-operator
+{{- range $key, $value := .Values.backupOperator.commandArgs }}
+        - "--{{ $key }}={{ $value }}"
+{{- end }}
+        env:
+        - name: MY_POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: MY_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        resources:
+          limits:
+            cpu: {{ .Values.backupOperator.resources.cpu }}
+            memory: {{ .Values.backupOperator.resources.memory }}
+          requests:
+            cpu: {{ .Values.backupOperator.resources.cpu }}
+            memory: {{ .Values.backupOperator.resources.memory }}
+    {{- if .Values.backupOperator.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.backupOperator.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.backupOperator.tolerations }}
+      tolerations:
+{{ toYaml .Values.backupOperator.tolerations | indent 8 }}
+    {{- end }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-service-account.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-service-account.yaml
new file mode 100644
index 00000000..06aec3df
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/backup-operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.backupOperatorServiceAccount.create .Values.deployments.backupOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "etcd-backup-operator.serviceAccountName" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-backup-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+{{- end }}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/etcd-cluster-crd.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/etcd-cluster-crd.yaml
new file mode 100644
index 00000000..0d385d8f
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/etcd-cluster-crd.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.customResources.createEtcdClusterCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdCluster"
+metadata:
+  name: {{ .Values.etcdCluster.name }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+  annotations:
+    "helm.sh/hook": "post-install"
+    "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+  size: {{ .Values.etcdCluster.size }}
+  version: "{{ .Values.etcdCluster.version }}"
+  pod:
+{{ toYaml .Values.etcdCluster.pod | indent 4 }}
+  {{- if .Values.etcdCluster.enableTLS }}
+  TLS:
+{{ toYaml .Values.etcdCluster.tls | indent 4 }}
+  {{- end }}
+{{- end }}
+
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-cluster-role.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-cluster-role.yaml
new file mode 100644
index 00000000..62085978
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-cluster-role.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.rbac.create }}
+---
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+kind: ClusterRole
+metadata:
+  name: {{ template "etcd-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+rules:
+- apiGroups:
+  - etcd.database.coreos.com
+  resources:
+  - etcdclusters
+  - etcdbackups
+  - etcdrestores
+  verbs:
+  - "*"
+- apiGroups:
+  - apiextensions.k8s.io
+  resources:
+  - customresourcedefinitions
+  verbs:
+  - "*"
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  - endpoints
+  - persistentvolumeclaims
+  - events
+  verbs:
+  - "*"
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  verbs:
+  - "*"
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-clusterrole-binding.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..09594ccc
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.etcdOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }}
+metadata:
+  name: {{ template "etcd-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+  name: {{ template "etcd-operator.serviceAccountName" . }}
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-deployment.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-deployment.yaml
new file mode 100644
index 00000000..bb6b1a75
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-deployment.yaml
@@ -0,0 +1,81 @@
+{{- if .Values.deployments.etcdOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: {{ template "etcd-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "etcd-operator.fullname" . }}
+      release: {{ .Release.Name }}
+  replicas: {{ .Values.etcdOperator.replicaCount }}
+  template:
+    metadata:
+      name: {{ template "etcd-operator.fullname" . }}
+      labels:
+        app: {{ template "etcd-operator.fullname" . }}
+        release: {{ .Release.Name }}
+    spec:
+      serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }}
+      containers:
+      - name: {{ template "etcd-operator.fullname" . }}
+        image: "{{ .Values.etcdOperator.image.repository }}:{{ .Values.etcdOperator.image.tag }}"
+        imagePullPolicy: {{ .Values.etcdOperator.image.pullPolicy }}
+        command:
+        - etcd-operator
+{{- range $key, $value := .Values.etcdOperator.commandArgs }}
+        - "--{{ $key }}={{ $value }}"
+{{- end }}
+        env:
+        - name: MY_POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: MY_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        resources:
+          limits:
+            cpu: {{ .Values.etcdOperator.resources.cpu }}
+            memory: {{ .Values.etcdOperator.resources.memory }}
+          requests:
+            cpu: {{ .Values.etcdOperator.resources.cpu }}
+            memory: {{ .Values.etcdOperator.resources.memory }}
+        {{- if .Values.etcdOperator.livenessProbe.enabled }}
+        livenessProbe:
+          httpGet:
+            path: /readyz
+            port: 8080
+          initialDelaySeconds: {{ .Values.etcdOperator.livenessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.etcdOperator.livenessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.etcdOperator.livenessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.etcdOperator.livenessProbe.successThreshold }}
+          failureThreshold: {{ .Values.etcdOperator.livenessProbe.failureThreshold }}
+        {{- end}}
+        {{- if .Values.etcdOperator.readinessProbe.enabled }}
+        readinessProbe:
+          httpGet:
+            path: /readyz
+            port: 8080
+          initialDelaySeconds: {{ .Values.etcdOperator.readinessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.etcdOperator.readinessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.etcdOperator.readinessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.etcdOperator.readinessProbe.successThreshold }}
+          failureThreshold: {{ .Values.etcdOperator.readinessProbe.failureThreshold }}
+        {{- end }}
+    {{- if .Values.etcdOperator.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.etcdOperator.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.etcdOperator.tolerations }}
+      tolerations:
+{{ toYaml .Values.etcdOperator.tolerations | indent 8 }}
+    {{- end }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-service-account.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-service-account.yaml
new file mode 100644
index 00000000..2faba8af
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.etcdOperatorServiceAccount.create .Values.deployments.etcdOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "etcd-operator.serviceAccountName" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+{{- end }}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-etcd-crd.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-etcd-crd.yaml
new file mode 100644
index 00000000..73faaab8
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-etcd-crd.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.customResources.createRestoreCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdRestore"
+metadata:
+  # An EtcdCluster with the same name will be created
+  name: {{ .Values.etcdCluster.name }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-restore-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+  annotations:
+    "helm.sh/hook": "post-install"
+    "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+  clusterSpec:
+    size: {{ .Values.etcdCluster.size }}
+    baseImage: "{{ .Values.etcdCluster.image.repository }}"
+    version: {{ .Values.etcdCluster.image.tag }}
+    pod:
+{{ toYaml .Values.etcdCluster.pod | indent 6 }}
+    {{- if .Values.etcdCluster.enableTLS }}
+    TLS:
+{{ toYaml .Values.etcdCluster.tls | indent 6 }}
+    {{- end }}
+{{ toYaml .Values.restoreOperator.spec | indent 2 }}
+{{- end}}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..9a6696ef
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.restoreOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+metadata:
+  name: {{ template "etcd-restore-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-restore-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+  name: {{ template "etcd-restore-operator.serviceAccountName" . }}
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-deployment.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-deployment.yaml
new file mode 100644
index 00000000..5c4784de
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-deployment.yaml
@@ -0,0 +1,63 @@
+{{- if .Values.deployments.restoreOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: {{ template "etcd-restore-operator.fullname" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-restore-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "etcd-restore-operator.name" . }}
+      release: {{ .Release.Name }}
+  replicas: {{ .Values.restoreOperator.replicaCount }}
+  template:
+    metadata:
+      name: {{ template "etcd-restore-operator.fullname" . }}
+      labels:
+        app: {{ template "etcd-restore-operator.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      serviceAccountName: {{ template "etcd-restore-operator.serviceAccountName" . }}
+      containers:
+      - name: {{ .Values.restoreOperator.name }}
+        image: "{{ .Values.restoreOperator.image.repository }}:{{ .Values.restoreOperator.image.tag }}"
+        imagePullPolicy: {{ .Values.restoreOperator.image.pullPolicy }}
+        ports:
+        - containerPort: {{ .Values.restoreOperator.port }}
+        command:
+        - etcd-restore-operator
+{{- range $key, $value := .Values.restoreOperator.commandArgs }}
+        - "--{{ $key }}={{ $value }}"
+{{- end }}
+        env:
+        - name: MY_POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: MY_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: SERVICE_ADDR
+          value: "{{ .Values.restoreOperator.name }}:{{ .Values.restoreOperator.port }}"
+        resources:
+          limits:
+            cpu: {{ .Values.restoreOperator.resources.cpu }}
+            memory: {{ .Values.restoreOperator.resources.memory }}
+          requests:
+            cpu: {{ .Values.restoreOperator.resources.cpu }}
+            memory: {{ .Values.restoreOperator.resources.memory }}
+    {{- if .Values.restoreOperator.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.restoreOperator.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.restoreOperator.tolerations }}
+      tolerations:
+{{ toYaml .Values.restoreOperator.tolerations | indent 8 }}
+    {{- end }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service-account.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service-account.yaml
new file mode 100644
index 00000000..595cee92
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.restoreOperatorServiceAccount.create .Values.deployments.restoreOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "etcd-restore-operator.serviceAccountName" . }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-restore-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+{{- end }}
\ No newline at end of file
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service.yaml
new file mode 100644
index 00000000..052be364
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/templates/restore-operator-service.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.deployments.restoreOperator }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ .Values.restoreOperator.name }}
+  labels:
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app: {{ template "etcd-restore-operator.name" . }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+spec:
+  ports:
+  - protocol: TCP
+    name: http-etcd-restore-port
+    port: {{ .Values.restoreOperator.port }}
+  selector:
+    app: {{ template "etcd-restore-operator.name" . }}
+    release: {{ .Release.Name }}
+{{- end }}
diff --git a/integration/k8s/service-center/charts/etcd/charts/etcd-operator/values.yaml b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/values.yaml
new file mode 100644
index 00000000..91a530f3
--- /dev/null
+++ b/integration/k8s/service-center/charts/etcd/charts/etcd-operator/values.yaml
@@ -0,0 +1,152 @@
+# Default values for etcd-operator.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Install Default RBAC roles and bindings
+rbac:
+  create: true
+  apiVersion: v1beta1
+
+## Service account names and whether to create them
+serviceAccount:
+  etcdOperatorServiceAccount:
+    create: true
+    name:
+  backupOperatorServiceAccount:
+    create: true
+    name:
+  restoreOperatorServiceAccount:
+    create: true
+    name:
+
+# Select what to deploy
+deployments:
+  etcdOperator: true
+  # one time deployment, delete once completed,
+  # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md
+  backupOperator: true
+  # one time deployment, delete once completed
+  # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md
+  restoreOperator: true
+
+# creates custom resources, not all required,
+# you could use `helm template --values <values.yaml> --name release_name ... `
+# and create the resources yourself to deploy on your cluster later
+customResources:
+  createEtcdClusterCRD: false
+  createBackupCRD: false
+  createRestoreCRD: false
+
+# etcdOperator
+etcdOperator:
+  name: etcd-operator
+  replicaCount: 1
+  image:
+    repository: quay.io/coreos/etcd-operator
+    tag: v0.9.2
+    pullPolicy: Always
+  resources:
+    cpu: 100m
+    memory: 128Mi
+  ## Node labels for etcd-operator pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  nodeSelector: {}
+  ## additional command arguments go here; will be translated to `--key=value` form
+  ## e.g., analytics: true
+  commandArgs: {}
+  ## Configurable health checks against the /readyz endpoint that etcd-operator exposes
+  readinessProbe:
+    enabled: false
+    initialDelaySeconds: 0
+    periodSeconds: 10
+    timeoutSeconds: 1
+    successThreshold: 1
+    failureThreshold: 3
+  livenessProbe:
+    enabled: false
+    initialDelaySeconds: 0
+    periodSeconds: 10
+    timeoutSeconds: 1
+    successThreshold: 1
+    failureThreshold: 3
+# backup spec
+backupOperator:
+  name: etcd-backup-operator
+  replicaCount: 1
+  image:
+    repository: quay.io/coreos/etcd-operator
+    tag: v0.9.2
+    pullPolicy: Always
+  resources:
+    cpu: 100m
+    memory: 128Mi
+  spec:
+    storageType: S3
+    s3:
+      s3Bucket:
+      awsSecret:
+  ## Node labels for etcd pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  nodeSelector: {}
+  ## additional command arguments go here; will be translated to `--key=value` form
+  ## e.g., analytics: true
+  commandArgs: {}
+
+# restore spec
+restoreOperator:
+  name: etcd-restore-operator
+  replicaCount: 1
+  image:
+    repository: quay.io/coreos/etcd-operator
+    tag: v0.9.2
+    pullPolicy: Always
+  port: 19999
+  resources:
+    cpu: 100m
+    memory: 128Mi
+  spec:
+    s3:
+      # The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>"
+      # e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup"
+      path:
+      awsSecret:
+  ## Node labels for etcd pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  nodeSelector: {}
+  ## additional command arguments go here; will be translated to `--key=value` form
+  ## e.g., analytics: true
+  commandArgs: {}
+
+## etcd-cluster specific values
+etcdCluster:
+  name: etcd-cluster
+  size: 3
+  version: 3.2.13
+  image:
+    repository: quay.io/coreos/etcd
+    tag: v3.2.13
+    pullPolicy: Always
+  enableTLS: false
+  # TLS configs
+  tls:
+    static:
+      member:
+        peerSecret: etcd-peer-tls
+        serverSecret: etcd-server-tls
+      operatorSecret: etcd-client-tls
+  ## etcd cluster pod specific values
+  ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
+  pod:
+    ## Antiaffinity for etcd pod assignment
+    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+    antiAffinity: false
+    resources:
+      limits:
+        cpu: 100m
+        memory: 128Mi
+      requests:
+        cpu: 100m
+        memory: 128Mi
+    ## Node labels for etcd pod assignment
+    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+    nodeSelector: {}
diff --git a/pkg/util/util.go b/pkg/util/util.go
index 42d34f05..c89f9d5c 100644
--- a/pkg/util/util.go
+++ b/pkg/util/util.go
@@ -71,6 +71,8 @@ func StringJoin(args []string, sep string) string {
 		return ""
 	case 1:
 		return args[0]
+	case 2:
+		return args[0] + sep + args[1]
 	default:
 		n := len(sep) * (l - 1)
 		for i := 0; i < l; i++ {
diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go
index 38c02e8e..6bee8fc2 100644
--- a/pkg/util/util_test.go
+++ b/pkg/util/util_test.go
@@ -107,6 +107,21 @@ func TestStringToBytesWithNoCopy(t *testing.T) {
 	}
 }
 
+func TestListToMap(t *testing.T) {
+	m := ListToMap(nil)
+	if m == nil || len(m) > 0 {
+		t.Fatalf("TestListToMap falied")
+	}
+	m = ListToMap([]string{})
+	if m == nil || len(m) > 0 {
+		t.Fatalf("TestListToMap falied")
+	}
+	m = ListToMap([]string{"a"})
+	if m == nil || len(m) != 1 || m["a"] != struct{}{} {
+		t.Fatalf("TestListToMap falied")
+	}
+}
+
 func TestSafeCloseChan(t *testing.T) {
 	var ch chan struct{}
 	SafeCloseChan(ch)
diff --git a/server/core/backend/parser.go b/server/core/backend/parser.go
index 26b291d6..74403cba 100644
--- a/server/core/backend/parser.go
+++ b/server/core/backend/parser.go
@@ -200,7 +200,7 @@ func GetInfoFromSchemaKV(kv *KeyValue) (schemaId string) {
 	return keys[l-1]
 }
 
-func GetInfoFromDependencyQueueKV(kv *KeyValue) (consumerId, domainProject string) {
+func GetInfoFromDependencyQueueKV(kv *KeyValue) (consumerId, domainProject, uuid string) {
 	keys := KvToResponse(kv)
 	l := len(keys)
 	if l < 4 {
@@ -208,5 +208,29 @@ func GetInfoFromDependencyQueueKV(kv *KeyValue) (consumerId, domainProject strin
 	}
 	consumerId = keys[l-2]
 	domainProject = fmt.Sprintf("%s/%s", keys[l-4], keys[l-3])
+	uuid = keys[l-1]
 	return
 }
+
+func GetInfoFromDependencyRuleKV(kv *KeyValue) (key *pb.MicroServiceKey) {
+	keys := KvToResponse(kv)
+	l := len(keys)
+	if l < 5 {
+		return
+	}
+	if keys[l-1] == "*" {
+		return &pb.MicroServiceKey{
+			Tenant:      fmt.Sprintf("%s/%s", keys[l-5], keys[l-4]),
+			Environment: keys[l-2],
+			ServiceName: keys[l-1],
+		}
+	}
+
+	return &pb.MicroServiceKey{
+		Tenant:      fmt.Sprintf("%s/%s", keys[l-7], keys[l-6]),
+		Environment: keys[l-4],
+		AppId:       keys[l-3],
+		ServiceName: keys[l-2],
+		Version:     keys[l-1],
+	}
+}
diff --git a/server/core/backend/parser_test.go b/server/core/backend/parser_test.go
index 77f34cc8..a4b82111 100644
--- a/server/core/backend/parser_test.go
+++ b/server/core/backend/parser_test.go
@@ -183,12 +183,37 @@ func TestGetInfoFromKV(t *testing.T) {
 		t.Fatalf("TestGetInfoFromKV failed")
 	}
 
-	s, d = GetInfoFromDependencyQueueKV(&KeyValue{Key: []byte(core.GenerateConsumerDependencyQueueKey("a/b", "c", "d"))})
-	if s != "c" || d != "a/b" {
+	u := ""
+	s, d, u = GetInfoFromDependencyQueueKV(&KeyValue{Key: []byte(core.GenerateConsumerDependencyQueueKey("a/b", "c", "d"))})
+	if s != "c" || d != "a/b" || u != "d" {
 		t.Fatalf("TestGetInfoFromKV failed")
 	}
-	s, d = GetInfoFromDependencyQueueKV(&KeyValue{Key: []byte("sdf")})
-	if s != "" || d != "" {
+	s, d, u = GetInfoFromDependencyQueueKV(&KeyValue{Key: []byte("sdf")})
+	if s != "" || d != "" || u != "" {
+		t.Fatalf("TestGetInfoFromKV failed")
+	}
+
+	k := GetInfoFromDependencyRuleKV(&KeyValue{Key: []byte(core.GenerateProviderDependencyRuleKey("a/b", &proto.MicroServiceKey{
+		Tenant:      "a/b",
+		AppId:       "c",
+		ServiceName: "*",
+	}))})
+	if k == nil || k.AppId != "" || k.ServiceName != "*" {
+		t.Fatalf("TestGetInfoFromKV failed")
+	}
+
+	k = GetInfoFromDependencyRuleKV(&KeyValue{Key: []byte(core.GenerateProviderDependencyRuleKey("a/b", &proto.MicroServiceKey{
+		Tenant:      "a/b",
+		AppId:       "c",
+		ServiceName: "d",
+		Version:     "e",
+	}))})
+	if k == nil || k.AppId != "c" || k.ServiceName != "d" {
+		t.Fatalf("TestGetInfoFromKV failed")
+	}
+
+	k = GetInfoFromDependencyRuleKV(&KeyValue{Key: []byte("abc")})
+	if k != nil {
 		t.Fatalf("TestGetInfoFromKV failed")
 	}
 }
diff --git a/server/core/key_generator.go b/server/core/key_generator.go
index 884e0010..99129179 100644
--- a/server/core/key_generator.go
+++ b/server/core/key_generator.go
@@ -42,6 +42,9 @@ const (
 	REGISTRY_DEPS_RULE_KEY      = "dep-rules"
 	REGISTRY_DEPS_QUEUE_KEY     = "dep-queue"
 	REGISTRY_METRICS_KEY        = "metrics"
+	DEPS_QUEUE_UUID             = "0"
+	DEPS_CONSUMER               = "c"
+	DEPS_PROVIDER               = "p"
 )
 
 func GetRootKey() string {
@@ -230,6 +233,12 @@ func GenerateInstanceLeaseKey(domainProject string, serviceId string, instanceId
 }
 
 func GenerateServiceDependencyRuleKey(serviceType string, domainProject string, in *pb.MicroServiceKey) string {
+	if in == nil {
+		return util.StringJoin([]string{
+			GetServiceDependencyRuleRootKey(domainProject),
+			serviceType,
+		}, SPLIT)
+	}
 	if in.ServiceName == "*" {
 		return util.StringJoin([]string{
 			GetServiceDependencyRuleRootKey(domainProject),
@@ -249,11 +258,11 @@ func GenerateServiceDependencyRuleKey(serviceType string, domainProject string,
 }
 
 func GenerateConsumerDependencyRuleKey(domainProject string, in *pb.MicroServiceKey) string {
-	return GenerateServiceDependencyRuleKey("c", domainProject, in)
+	return GenerateServiceDependencyRuleKey(DEPS_CONSUMER, domainProject, in)
 }
 
 func GenerateProviderDependencyRuleKey(domainProject string, in *pb.MicroServiceKey) string {
-	return GenerateServiceDependencyRuleKey("p", domainProject, in)
+	return GenerateServiceDependencyRuleKey(DEPS_PROVIDER, domainProject, in)
 }
 
 func GetServiceDependencyRuleRootKey(domainProject string) string {
@@ -274,23 +283,6 @@ func GetServiceDependencyQueueRootKey(domainProject string) string {
 	}, SPLIT)
 }
 
-func GenerateConsumerDependencyKey(domainProject string, consumerId string, providerId string) string {
-	return GenerateServiceDependencyKey("c", domainProject, consumerId, providerId)
-}
-
-func GenerateServiceDependencyKey(serviceType string, domainProject string, serviceId1 string, serviceId2 string) string {
-	return util.StringJoin([]string{
-		GetServiceDependencyRootKey(domainProject),
-		serviceType,
-		serviceId1,
-		serviceId2,
-	}, SPLIT)
-}
-
-func GenerateProviderDependencyKey(domainProject string, providerId string, consumerId string) string {
-	return GenerateServiceDependencyKey("p", domainProject, providerId, consumerId)
-}
-
 func GenerateConsumerDependencyQueueKey(domainProject, consumerId, uuid string) string {
 	return util.StringJoin([]string{
 		GetServiceDependencyQueueRootKey(domainProject),
diff --git a/server/core/key_generator_test.go b/server/core/key_generator_test.go
new file mode 100644
index 00000000..875cd674
--- /dev/null
+++ b/server/core/key_generator_test.go
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+	"github.com/apache/incubator-servicecomb-service-center/server/core/proto"
+	"testing"
+)
+
+func TestGenerateDependencyRuleKey(t *testing.T) {
+	// consumer
+	k := GenerateConsumerDependencyRuleKey("a", nil)
+	if k != "/cse-sr/ms/dep-rules/a/c" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+	k = GenerateConsumerDependencyRuleKey("a", &proto.MicroServiceKey{
+		Environment: "1",
+		AppId:       "2",
+		ServiceName: "3",
+		Version:     "4",
+	})
+	if k != "/cse-sr/ms/dep-rules/a/c/1/2/3/4" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+	k = GenerateConsumerDependencyRuleKey("a", &proto.MicroServiceKey{
+		Environment: "1",
+		AppId:       "2",
+		ServiceName: "*",
+		Version:     "4",
+	})
+	if k != "/cse-sr/ms/dep-rules/a/c/1/*" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+
+	// provider
+	k = GenerateProviderDependencyRuleKey("a", nil)
+	if k != "/cse-sr/ms/dep-rules/a/p" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+	k = GenerateProviderDependencyRuleKey("a", &proto.MicroServiceKey{
+		Environment: "1",
+		AppId:       "2",
+		ServiceName: "3",
+		Version:     "4",
+	})
+	if k != "/cse-sr/ms/dep-rules/a/p/1/2/3/4" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+	k = GenerateProviderDependencyRuleKey("a", &proto.MicroServiceKey{
+		Environment: "1",
+		AppId:       "2",
+		ServiceName: "*",
+		Version:     "4",
+	})
+	if k != "/cse-sr/ms/dep-rules/a/p/1/*" {
+		t.Fatalf("TestGenerateDependencyRuleKey failed")
+	}
+}
diff --git a/server/core/microservice.go b/server/core/microservice.go
index aabd59e5..5e4484cb 100644
--- a/server/core/microservice.go
+++ b/server/core/microservice.go
@@ -93,7 +93,12 @@ func IsDefaultDomainProject(domainProject string) bool {
 }
 
 func SetSharedMode() {
-	sharedServiceNames = util.ListToMap(strings.Split(os.Getenv("CSE_SHARED_SERVICES"), ","))
+	sharedServiceNames = make(map[string]struct{})
+	for _, s := range strings.Split(os.Getenv("CSE_SHARED_SERVICES"), ",") {
+		if len(s) > 0 {
+			sharedServiceNames[s] = struct{}{}
+		}
+	}
 	sharedServiceNames[Service.ServiceName] = struct{}{}
 }
 
@@ -108,18 +113,9 @@ func IsShared(key *pb.MicroServiceKey) bool {
 	return ok
 }
 
-func IsSCKey(key *pb.MicroServiceKey) bool {
-	if !IsDefaultDomainProject(key.Tenant) {
-		return false
-	}
-	return key.AppId == Service.AppId && key.ServiceName == Service.ServiceName
-}
-
 func IsSCInstance(ctx context.Context) bool {
-	if ctx.Value(IS_SC_SELF) != nil && ctx.Value(IS_SC_SELF).(bool) {
-		return true
-	}
-	return false
+	b, _ := ctx.Value(IS_SC_SELF).(bool)
+	return b
 }
 
 func GetExistenceRequest() *pb.GetExistenceRequest {
diff --git a/server/core/microservice_test.go b/server/core/microservice_test.go
new file mode 100644
index 00000000..d642d641
--- /dev/null
+++ b/server/core/microservice_test.go
@@ -0,0 +1,83 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+	"github.com/apache/incubator-servicecomb-service-center/server/core/proto"
+	"github.com/apache/incubator-servicecomb-service-center/version"
+	"golang.org/x/net/context"
+	"os"
+	"testing"
+)
+
+func TestPrepareSelfRegistration(t *testing.T) {
+	version.Ver().RunMode = "dev"
+	prepareSelfRegistration()
+	if Service == nil || Service.Environment != "development" {
+		t.Fatalf("TestPrepareSelfRegistration faild, %v", Service)
+	}
+
+	version.Ver().RunMode = "prod"
+	prepareSelfRegistration()
+	if Service == nil || Service.AppId != "default" || Service.ServiceName != "SERVICECENTER" ||
+		Service.Environment != "production" || Service.Properties["allowCrossApp"] != "true" {
+		t.Fatalf("TestPrepareSelfRegistration faild, %v", Service)
+	}
+
+	if Instance == nil || Instance.Status != "UP" {
+		t.Fatalf("TestPrepareSelfRegistration faild, %v", Instance)
+	}
+
+	if IsSCInstance(context.Background()) {
+		t.Fatalf("TestPrepareSelfRegistration faild")
+	}
+
+	exist := GetExistenceRequest()
+	if exist == nil || exist.Environment != "production" || exist.ServiceName != "SERVICECENTER" ||
+		exist.AppId != "default" {
+		t.Fatalf("TestPrepareSelfRegistration faild, %v", exist)
+	}
+
+	instance := RegisterInstanceRequest("test", []string{"a"})
+	if instance == nil || instance.Instance.HostName != "test" || instance.Instance.Endpoints[0] != "a" {
+		t.Fatalf("TestPrepareSelfRegistration faild, %v", instance)
+	}
+}
+
+func TestSetSharedMode(t *testing.T) {
+	SetSharedMode()
+	if IsShared(&proto.MicroServiceKey{}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+	if IsShared(&proto.MicroServiceKey{Tenant: "default"}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+	if IsShared(&proto.MicroServiceKey{Tenant: "default/default"}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+	if IsShared(&proto.MicroServiceKey{Tenant: "default/default", AppId: "default"}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+
+	os.Setenv("CSE_SHARED_SERVICES", "shared")
+	SetSharedMode()
+	if IsShared(&proto.MicroServiceKey{Tenant: "default/default", AppId: "default", ServiceName: "no-shared"}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+	if !IsShared(&proto.MicroServiceKey{Tenant: "default/default", AppId: "default", ServiceName: "shared"}) {
+		t.Fatalf("TestSetSharedMode failed")
+	}
+}
diff --git a/server/service/dependency.go b/server/service/dependency.go
index 31beb926..71a8cbdc 100644
--- a/server/service/dependency.go
+++ b/server/service/dependency.go
@@ -91,7 +91,7 @@ func (s *MicroServiceService) AddOrUpdateDependencies(ctx context.Context, depen
 			return pb.CreateResponse(scerr.ErrInternal, err.Error()), err
 		}
 
-		id := "0"
+		id := apt.DEPS_QUEUE_UUID
 		if !override {
 			id = util.GenerateUuid()
 		}
diff --git a/server/service/event/dependency_event_handler.go b/server/service/event/dependency_event_handler.go
index f4552aea..731c7bde 100644
--- a/server/service/event/dependency_event_handler.go
+++ b/server/service/event/dependency_event_handler.go
@@ -45,45 +45,51 @@ func (h *DependencyEventHandler) OnEvent(evt backend.KvEvent) {
 	if action != pb.EVT_CREATE && action != pb.EVT_UPDATE && action != pb.EVT_INIT {
 		return
 	}
+	h.notify()
+}
 
+func (h *DependencyEventHandler) notify() {
 	h.signals.Put(struct{}{})
 }
 
-func (h *DependencyEventHandler) loop() {
+func (h *DependencyEventHandler) backoff(backoff func(), retries int) int {
+	if backoff != nil {
+		<-time.After(util.GetBackoff().Delay(retries))
+		backoff()
+	}
+	return retries + 1
+}
+
+func (h *DependencyEventHandler) tryWithBackoff(success func() error, backoff func(), retries int) (error, int) {
+	lock, err := mux.Try(mux.DEP_QUEUE_LOCK)
+	if err != nil {
+		log.Errorf(err, "try to lock %s failed", mux.DEP_QUEUE_LOCK)
+		return err, h.backoff(backoff, retries)
+	}
+
+	if lock == nil {
+		return nil, 0
+	}
+
+	err = success()
+	lock.Unlock()
+	if err != nil {
+		log.Errorf(err, "handle dependency event failed")
+		return err, h.backoff(backoff, retries)
+	}
+
+	return nil, 0
+}
+
+func (h *DependencyEventHandler) eventLoop() {
 	gopool.Go(func(ctx context.Context) {
 		retries := 0
-		delay := func() {
-			<-time.After(util.GetBackoff().Delay(retries))
-			retries++
-
-			h.signals.Put(struct{}{})
-		}
 		for {
 			select {
 			case <-ctx.Done():
 				return
 			case <-h.signals.Chan():
-				lock, err := mux.Try(mux.DEP_QUEUE_LOCK)
-				if err != nil {
-					log.Errorf(err, "try to lock %s failed", mux.DEP_QUEUE_LOCK)
-					delay()
-					continue
-				}
-
-				if lock == nil {
-					retries = 0
-					continue
-				}
-
-				err = h.Handle()
-				lock.Unlock()
-				if err != nil {
-					log.Errorf(err, "handle dependency event failed")
-					delay()
-					continue
-				}
-
-				retries = 0
+				_, retries = h.tryWithBackoff(h.Handle, h.notify, retries)
 			}
 		}
 	})
@@ -129,10 +135,16 @@ func (h *DependencyEventHandler) Handle() error {
 
 	dependencyTree := util.NewTree(isAddToLeft)
 
+	cleanUpDomainProjects := make(map[string]struct{})
+	defer h.CleanUp(cleanUpDomainProjects)
+
 	for _, kv := range resp.Kvs {
 		r := kv.Value.(*pb.ConsumerDependency)
 
-		_, domainProject := backend.GetInfoFromDependencyQueueKV(kv)
+		_, domainProject, uuid := backend.GetInfoFromDependencyQueueKV(kv)
+		if uuid == core.DEPS_QUEUE_UUID {
+			cleanUpDomainProjects[domainProject] = struct{}{}
+		}
 		res := NewDependencyEventHandlerResource(r, kv, domainProject)
 
 		dependencyTree.AddNode(res)
@@ -189,10 +201,18 @@ func (h *DependencyEventHandler) removeKV(ctx context.Context, kv *backend.KeyVa
 	return nil
 }
 
+func (h *DependencyEventHandler) CleanUp(domainProjects map[string]struct{}) {
+	for domainProject := range domainProjects {
+		if err := serviceUtil.CleanUpDependencyRules(context.Background(), domainProject); err != nil {
+			log.Errorf(err, "clean up '%s' dependency rules failed")
+		}
+	}
+}
+
 func NewDependencyEventHandler() *DependencyEventHandler {
 	h := &DependencyEventHandler{
 		signals: queue.NewUniQueue(),
 	}
-	h.loop()
+	h.eventLoop()
 	return h
 }
diff --git a/server/service/event/dependency_event_handler_test.go b/server/service/event/dependency_event_handler_test.go
new file mode 100644
index 00000000..6452c7b1
--- /dev/null
+++ b/server/service/event/dependency_event_handler_test.go
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package event
+
+import "testing"
+
+func TestDependencyEventHandler_Handle(t *testing.T) {
+	h := &DependencyEventHandler{}
+	r := h.backoff(nil, 0)
+	if r != 1 {
+		t.Fatalf("TestDependencyEventHandler_Handle failed, %v", r)
+	}
+	cb := 0
+	r = h.backoff(func() { cb = 1 }, 1)
+	if r != 2 || cb != 1 {
+		t.Fatalf("TestDependencyEventHandler_Handle failed, %v", r)
+	}
+}
diff --git a/server/service/util/dependency.go b/server/service/util/dependency.go
index 2b483ff8..b8776b0b 100644
--- a/server/service/util/dependency.go
+++ b/server/service/util/dependency.go
@@ -29,14 +29,16 @@ import (
 )
 
 type Dependency struct {
-	ConsumerId                string
-	DomainProject             string
+	DomainProject string
+	// store the consumer Dependency from dep-queue object
+	Consumer      *pb.MicroServiceKey
+	ProvidersRule []*pb.MicroServiceKey
+	// store the parsed rules from Dependency object
 	removedDependencyRuleList []*pb.MicroServiceKey
-	NewDependencyRuleList     []*pb.MicroServiceKey
-	err                       chan error
-	chanNum                   int8
-	Consumer                  *pb.MicroServiceKey
-	ProvidersRule             []*pb.MicroServiceKey
+	newDependencyRuleList     []*pb.MicroServiceKey
+
+	err     chan error
+	chanNum int8
 }
 
 func (dep *Dependency) RemoveConsumerOfProviderRule() {
@@ -93,7 +95,7 @@ func (dep *Dependency) AddConsumerOfProviderRule() {
 
 func (dep *Dependency) addConsumerOfProviderRule(ctx context.Context) {
 	opts := []registry.PluginOp{}
-	for _, providerRule := range dep.NewDependencyRuleList {
+	for _, providerRule := range dep.newDependencyRuleList {
 		proProkey := apt.GenerateProviderDependencyRuleKey(providerRule.Tenant, providerRule)
 		tmpValue, err := TransferToMicroServiceDependency(ctx, proProkey)
 		if err != nil {
@@ -125,9 +127,9 @@ func (dep *Dependency) addConsumerOfProviderRule(ctx context.Context) {
 	dep.err <- nil
 }
 
-func (dep *Dependency) UpdateProvidersRuleOfConsumer(conKey string) error {
+func (dep *Dependency) UpdateProvidersRuleOfConsumer(ctx context.Context, conKey string) error {
 	if len(dep.ProvidersRule) == 0 {
-		_, err := backend.Registry().Do(context.TODO(),
+		_, err := backend.Registry().Do(ctx,
 			registry.DEL,
 			registry.WithStrKey(conKey),
 		)
@@ -146,7 +148,7 @@ func (dep *Dependency) UpdateProvidersRuleOfConsumer(conKey string) error {
 		log.Errorf(nil, "Marshal tmpValue fialed.")
 		return err
 	}
-	_, err = backend.Registry().Do(context.TODO(),
+	_, err = backend.Registry().Do(ctx,
 		registry.PUT,
 		registry.WithStrKey(conKey),
 		registry.WithValue(data))
diff --git a/server/service/util/dependency_test.go b/server/service/util/dependency_test.go
index af7b3a4b..8d4868c0 100644
--- a/server/service/util/dependency_test.go
+++ b/server/service/util/dependency_test.go
@@ -202,13 +202,13 @@ func TestDependency(t *testing.T) {
 		removedDependencyRuleList: []*proto.MicroServiceKey{
 			{ServiceName: "a", Version: "1.0.0"},
 		},
-		NewDependencyRuleList: []*proto.MicroServiceKey{
+		newDependencyRuleList: []*proto.MicroServiceKey{
 			{ServiceName: "a", Version: "1.0.0"},
 		},
 	}
 	d.RemoveConsumerOfProviderRule()
 	d.AddConsumerOfProviderRule()
-	err := d.UpdateProvidersRuleOfConsumer("")
+	err := d.UpdateProvidersRuleOfConsumer(context.Background(), "")
 	if err == nil {
 		t.Fatalf(`Dependency_UpdateProvidersRuleOfConsumer failed`)
 	}
@@ -259,6 +259,21 @@ func TestDependency(t *testing.T) {
 	if err == nil {
 		t.Fatalf(`DependencyRelation_GetDependencyProviders failed`)
 	}
+
+	err = CleanUpDependencyRules(context.Background(), "")
+	if err == nil {
+		t.Fatalf(`DependencyRelation_CleanUpDependencyRules failed`)
+	}
+
+	err = CleanUpDependencyRules(context.Background(), "a/b")
+	if err == nil {
+		t.Fatalf(`DependencyRelation_CleanUpDependencyRules failed`)
+	}
+
+	_, err = removeProviderRuleKeys(context.Background(), "a/b", nil)
+	if err == nil {
+		t.Fatalf(`DependencyRelation_removeProviderRuleKeys failed`)
+	}
 }
 
 func TestDependencyRelationFilterOpt(t *testing.T) {
diff --git a/server/service/util/dependency_util.go b/server/service/util/dependency_util.go
index d77a49e9..288aeea2 100644
--- a/server/service/util/dependency_util.go
+++ b/server/service/util/dependency_util.go
@@ -322,12 +322,12 @@ func syncDependencyRule(ctx context.Context, dep *Dependency, filter func(contex
 
 	if len(newDependencyRuleList) != 0 {
 		log.Infof("New dependency rule add for consumer %s, %v, ", consumerFlag, newDependencyRuleList)
-		dep.NewDependencyRuleList = newDependencyRuleList
+		dep.newDependencyRuleList = newDependencyRuleList
 		dep.AddConsumerOfProviderRule()
 	}
 
 	conKey := apt.GenerateConsumerDependencyRuleKey(dep.DomainProject, dep.Consumer)
-	err := dep.UpdateProvidersRuleOfConsumer(conKey)
+	err := dep.UpdateProvidersRuleOfConsumer(ctx, conKey)
 	if err != nil {
 		return err
 	}
@@ -415,7 +415,7 @@ func ParamsChecker(consumerInfo *pb.MicroServiceKey, providersInfo []*pb.MicroSe
 }
 
 func DeleteDependencyForDeleteService(domainProject string, serviceId string, service *pb.MicroServiceKey) (registry.PluginOp, error) {
-	key := apt.GenerateConsumerDependencyQueueKey(domainProject, serviceId, "0")
+	key := apt.GenerateConsumerDependencyQueueKey(domainProject, serviceId, apt.DEPS_QUEUE_UUID)
 	conDep := new(pb.ConsumerDependency)
 	conDep.Consumer = service
 	conDep.Providers = []*pb.MicroServiceKey{}
@@ -426,3 +426,109 @@ func DeleteDependencyForDeleteService(domainProject string, serviceId string, se
 	}
 	return registry.OpPut(registry.WithStrKey(key), registry.WithValue(data)), nil
 }
+
+func removeProviderRuleOfConsumer(ctx context.Context, domainProject string, cache map[string]bool) ([]registry.PluginOp, error) {
+	key := apt.GenerateConsumerDependencyRuleKey(domainProject, nil) + apt.SPLIT
+	resp, err := backend.Store().DependencyRule().Search(ctx, registry.WithStrKey(key), registry.WithPrefix())
+	if err != nil {
+		return nil, err
+	}
+
+	var ops []registry.PluginOp
+loop:
+	for _, kv := range resp.Kvs {
+		var left []*pb.MicroServiceKey
+		all := kv.Value.(*pb.MicroServiceDependency).Dependency
+		for _, key := range all {
+			if key.ServiceName == "*" {
+				continue loop
+			}
+
+			id := apt.GenerateProviderDependencyRuleKey(domainProject, key)
+			exist, ok := cache[id]
+			if !ok {
+				_, exist, err = FindServiceIds(ctx, key.Version, key)
+				if err != nil {
+					return nil, fmt.Errorf("%v, find service %s/%s/%s/%s",
+						err, domainProject, key.AppId, key.ServiceName, key.Version)
+				}
+				cache[id] = exist
+			}
+
+			if exist {
+				left = append(left, key)
+			}
+		}
+
+		if len(all) == len(left) {
+			continue
+		}
+
+		if len(left) == 0 {
+			ops = append(ops, registry.OpDel(registry.WithKey(kv.Key)))
+		} else {
+			val, err := json.Marshal(&pb.MicroServiceDependency{Dependency: left})
+			if err != nil {
+				return nil, fmt.Errorf("%v, marshal %v", err, left)
+			}
+			ops = append(ops, registry.OpPut(registry.WithKey(kv.Key), registry.WithValue(val)))
+		}
+	}
+	return ops, nil
+}
+
+func removeProviderRuleKeys(ctx context.Context, domainProject string, cache map[string]bool) ([]registry.PluginOp, error) {
+	key := apt.GenerateProviderDependencyRuleKey(domainProject, nil) + apt.SPLIT
+	resp, err := backend.Store().DependencyRule().Search(ctx, registry.WithStrKey(key), registry.WithPrefix())
+	if err != nil {
+		return nil, err
+	}
+
+	var ops []registry.PluginOp
+	for _, kv := range resp.Kvs {
+		id := util.BytesToStringWithNoCopy(kv.Key)
+		exist, ok := cache[id]
+		if !ok {
+			key := backend.GetInfoFromDependencyRuleKV(kv)
+			if key == nil || key.ServiceName == "*" {
+				continue
+			}
+
+			_, exist, err = FindServiceIds(ctx, key.Version, key)
+			if err != nil {
+				return nil, fmt.Errorf("find service %s/%s/%s/%s, %v",
+					domainProject, key.AppId, key.ServiceName, key.Version, err)
+			}
+			cache[id] = exist
+		}
+
+		if !exist {
+			ops = append(ops, registry.OpDel(registry.WithKey(kv.Key)))
+		}
+	}
+	return ops, nil
+}
+
+func CleanUpDependencyRules(ctx context.Context, domainProject string) error {
+	if len(domainProject) == 0 {
+		return errors.New("required domainProject")
+	}
+
+	cache := make(map[string]bool)
+	pOps, err := removeProviderRuleOfConsumer(ctx, domainProject, cache)
+	if err != nil {
+		return err
+	}
+
+	kOps, err := removeProviderRuleKeys(ctx, domainProject, cache)
+	if err != nil {
+		return err
+	}
+
+	ops := append(append([]registry.PluginOp(nil), pOps...), kOps...)
+	if len(ops) == 0 {
+		return nil
+	}
+
+	return backend.BatchCommit(ctx, ops)
+}


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services