You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by su...@apache.org on 2023/04/06 09:57:12 UTC

[shardingsphere-on-cloud] branch main updated: refactor(operator): refactor according to golangci-lint (#296)

This is an automated email from the ASF dual-hosted git repository.

sunnianjun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git


The following commit(s) were added to refs/heads/main by this push:
     new fa0e6db  refactor(operator): refactor according to golangci-lint (#296)
fa0e6db is described below

commit fa0e6dbc04c07a89a8e68018a238d136b420ba34
Author: liyao <ma...@126.com>
AuthorDate: Thu Apr 6 17:57:06 2023 +0800

    refactor(operator): refactor according to golangci-lint (#296)
    
    * chore: seperate golangci-lint to base and advanced lints
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * fix: introduce CondSucceed to Proxy and ComputeNode status
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: fix according to golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: refactor according to golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: update golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: remove loggercheck
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * fix: fix unit test
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore(ci): seperate golint to operator and pitr
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: add lint for commit message
    
    Signed-off-by: mlycore <ma...@126.com>
    
    ---------
    
    Signed-off-by: mlycore <ma...@126.com>
---
 .github/workflows/commit-msg.yml                   |  41 +++
 .../workflows/{golint.yml => operator-golint.yml}  |  12 +-
 .github/workflows/{golint.yml => pitr-golint.yml}  |   9 +-
 shardingsphere-operator/.golangci.yml              | 300 ++++++++++++++++++++-
 .../api/v1alpha1/compute_node_types.go             |   1 +
 .../api/v1alpha1/proxy_status.go                   |   1 +
 .../pkg/controllers/compute_node_controller.go     |  88 +++---
 .../controllers/compute_node_controller_test.go    |   2 +-
 .../pkg/controllers/proxy_controller.go            |  15 +-
 .../pkg/reconcile/computenode/deployment.go        |  34 +--
 .../pkg/reconcile/computenode/service.go           |  10 +-
 .../pkg/reconcile/proxy/deployment.go              | 115 ++++----
 .../pkg/reconcile/proxy/deployment_test.go         |   9 +-
 .../pkg/reconcile/proxy/resource.go                |   6 +-
 .../pkg/reconcile/proxy/status.go                  |  10 +-
 .../pkg/reconcile/proxyconfig/configmap.go         |   4 +-
 16 files changed, 501 insertions(+), 156 deletions(-)

diff --git a/.github/workflows/commit-msg.yml b/.github/workflows/commit-msg.yml
new file mode 100644
index 0000000..1b58530
--- /dev/null
+++ b/.github/workflows/commit-msg.yml
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+      
+
+name: lint-commit-message
+on: [pull_request]
+
+jobs:
+  commit-msg:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          fetch-depth: 0
+      - name: Cache lint-commit-message
+        id: cache-lint-commit-message
+        uses: actions/cache@v3
+        with:
+          path: lint-commit-message
+          key: ${{ runner.os }}-lint-commit-message
+      - name: Lint commit message
+        run: |
+          ! git log --oneline ${{ github.event.pull_request.base.sha }}... \
+            | grep -vP '^\w{8} Merge ' \
+            | grep -vP '^\w{8} (feat|fix|build|chore|docs|style|refactor|perf|test|ci)(\(\w+(-\w+)?\))?:(\s*).*'
diff --git a/.github/workflows/golint.yml b/.github/workflows/operator-golint.yml
similarity index 84%
copy from .github/workflows/golint.yml
copy to .github/workflows/operator-golint.yml
index d7e4fdb..f2dc26c 100644
--- a/.github/workflows/golint.yml
+++ b/.github/workflows/operator-golint.yml
@@ -25,6 +25,10 @@ on:
   pull_request:
     branches:
       - main 
+    paths:
+      - 'shardingsphere-operator/**'
+      - '.github/workflows/operator-golint.yml'
+      - 'shardingsphere-operator/.golangci-lint.yml'
 
 jobs:
   changes:
@@ -48,11 +52,3 @@ jobs:
         run: |
           cd shardingsphere-operator/
           $(go env GOPATH)/bin/golangci-lint run -v --timeout 300s ./...
-      - name: Lint Pitr Cli 
-        run: |
-          cd pitr/cli
-          $(go env GOPATH)/bin/golangci-lint run -v --timeout 300s ./...
-      - name: Lint Pitr Agent
-        run: |
-          cd pitr/gent
-          $(go env GOPATH)/bin/golangci-lint run -v --timeout 300s ./...
diff --git a/.github/workflows/golint.yml b/.github/workflows/pitr-golint.yml
similarity index 90%
rename from .github/workflows/golint.yml
rename to .github/workflows/pitr-golint.yml
index d7e4fdb..8e799cf 100644
--- a/.github/workflows/golint.yml
+++ b/.github/workflows/pitr-golint.yml
@@ -25,6 +25,11 @@ on:
   pull_request:
     branches:
       - main 
+    paths:
+      - 'pitr/**'
+      - '.github/workflows/pitr-golint.yml'
+      - 'pitr/agent/.golangci-lint.yml'
+      - 'pitr/cli/.golangci-lint.yml'
 
 jobs:
   changes:
@@ -44,10 +49,6 @@ jobs:
           go-version: '1.19'
       - name: Download golangci-lint
         run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.49.0
-      - name: Lint ShardingSphere-Operator
-        run: |
-          cd shardingsphere-operator/
-          $(go env GOPATH)/bin/golangci-lint run -v --timeout 300s ./...
       - name: Lint Pitr Cli 
         run: |
           cd pitr/cli
diff --git a/shardingsphere-operator/.golangci.yml b/shardingsphere-operator/.golangci.yml
index 1b9b6ce..b1a4835 100644
--- a/shardingsphere-operator/.golangci.yml
+++ b/shardingsphere-operator/.golangci.yml
@@ -17,32 +17,186 @@
 
 run:
   timeout: 10m
+  skip-files:
+    - "^zz_generated.*"
+    - "_test.go"
 linters:
   disable-all: true
   enable:
+    # The base lints 
+    - errcheck
+    - gosimple
+    - govet
     - ineffassign
+    - staticcheck
     - typecheck
-    - varcheck
     - unused
-    - structcheck
-    - deadcode
-    - gosimple
+    - unused
+    - bodyclose
+    - cyclop
+    - nilerr
     - goimports
-    - errcheck
-    - staticcheck
-    - stylecheck
-    - gosec
     - asciicheck
-    - bodyclose
+    - prealloc
+    - stylecheck
     - exportloopref
     - rowserrcheck
     - makezero
     - durationcheck
-    - prealloc
+    - gosec
     - predeclared
+    # Deprecated lints 
+    - structcheck
+    - varcheck
+    - deadcode
 
+    # The advanced lints 
+    - dupl
+    - exhaustive
+    - godot
+    - misspell 
+    - varnamelen
+    - gocritic
+    #- exhaustruct
+    #- nestif
+    #- wsl
+    #- gocognit
 # Refers: https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322
 linters-settings:
+  wsl:
+    # See https://github.com/bombsimon/wsl/blob/master/doc/configuration.md for documentation of available settings.
+    # These are the defaults for `golangci-lint`.
+
+    # Do strict checking when assigning from append (x = append(x, y)). If
+    # this is set to true - the append call must append either a variable
+    # assigned, called or used on the line above.
+    strict-append: true
+    # Allows assignments to be cuddled with variables used in calls on
+    # line above and calls to be cuddled with assignments of variables
+    # used in call on line above.
+    allow-assign-and-call: true
+    # Allows assignments to be cuddled with anything.
+    allow-assign-and-anything: false
+    # Allows cuddling to assignments even if they span over multiple lines.
+    allow-multiline-assign: true
+    # If the number of lines in a case block is equal to or lager than this
+    # number, the case *must* end white a newline.
+    force-case-trailing-whitespace: 0
+    # Allow blocks to end with comments.
+    allow-trailing-comment: false
+    # Allow multiple comments in the beginning of a block separated with newline.
+    allow-separated-leading-comment: false
+    # Allow multiple var/declaration statements to be cuddled.
+    allow-cuddle-declarations: false
+    # A list of call idents that everything can be cuddled with.
+    # Defaults to calls looking like locks.
+    allow-cuddle-with-calls: ["Lock", "RLock"]
+    # AllowCuddleWithRHS is a list of right hand side variables that is allowed
+    # to be cuddled with anything. Defaults to assignments or calls looking
+    # like unlocks.
+    allow-cuddle-with-rhs: ["Unlock", "RUnlock"]
+    # Causes an error when an If statement that checks an error variable doesn't
+    # cuddle with the assignment of that variable.
+    force-err-cuddling: false
+    # When force-err-cuddling is enabled this is a list of names
+    # used for error variables to check for in the conditional.
+    error-variable-names: ["err"]
+    # Causes an error if a short declaration (:=) cuddles with anything other than
+    # another short declaration.
+    # This logic overrides force-err-cuddling among others.
+    force-short-decl-cuddling: false
+  varnamelen:
+    # The longest distance, in source lines, that is being considered a "small scope".
+    # Variables used in at most this many lines will be ignored.
+    # Default: 5
+    max-distance: 6
+    # The minimum length of a variable's name that is considered "long".
+    # Variable names that are at least this long will be ignored.
+    # Default: 3
+    min-name-length: 2
+    # Check method receivers.
+    # Default: false
+    check-receiver: false 
+    # Check named return values.
+    # Default: false
+    check-return: true
+    # Check type parameters.
+    # Default: false
+    check-type-param: true
+    # Ignore "ok" variables that hold the bool return value of a type assertion.
+    # Default: false
+    ignore-type-assert-ok: true
+    # Ignore "ok" variables that hold the bool return value of a map index.
+    # Default: false
+    ignore-map-index-ok: true
+    # Ignore "ok" variables that hold the bool return value of a channel receive.
+    # Default: false
+    ignore-chan-recv-ok: true
+    # Optional list of variable names that should be ignored completely.
+    # Default: []
+    ignore-names:
+      - err
+    # Optional list of variable declarations that should be ignored completely.
+    # Entries must be in one of the following forms (see below for examples):
+    # - for variables, parameters, named return values, method receivers, or type parameters:
+    #   <name> <type>  (<type> can also be a pointer/slice/map/chan/...)
+    # - for constants: const <name>
+    #
+    # Default: []
+    ignore-decls:
+      - c echo.Context
+      - t testing.T
+      - f *foo.Bar
+      - e error
+      - i int
+      - const C
+      - T any
+      - m map[string]int
+  prealloc:
+    # IMPORTANT: we don't recommend using this linter before doing performance profiling.
+    # For most programs usage of prealloc will be a premature optimization.
+
+    # Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
+    # Default: true
+    simple: false
+    # Report pre-allocation suggestions on range loops.
+    # Default: true
+    range-loops: false
+    # Report pre-allocation suggestions on for loops.
+    # Default: false
+    for-loops: true
+  #nestif:
+    # Minimal complexity of if statements to report.
+    # Default: 5
+  # min-complexity: 4
+  misspell:
+    # Correct spellings using locale preferences for US or UK.
+    # Setting locale to US will correct the British spelling of 'colour' to 'color'.
+    # Default is to use a neutral variety of English.
+    locale: US
+    # Default: []
+    ignore-words:
+      - someword
+  godot:
+    # Comments to be checked: `declarations`, `toplevel`, or `all`.
+    # Default: declarations
+    scope: toplevel
+    # List of regexps for excluding particular comment lines from check.
+    # Default: []
+    exclude:
+      # Exclude todo and fixme comments.
+      - "^fixme:"
+      - "^todo:"
+    # Check that each sentence ends with a period.
+    # Default: true
+    period: false
+    # Check that each sentence starts with a capital letter.
+    # Default: false
+    capital: false 
+  dupl:
+    # Tokens count to trigger issue.
+    # Default: 150
+    threshold: 100
   cyclop:
     # The maximal code complexity to report.
     # Default: 10
@@ -75,6 +229,132 @@ linters-settings:
     # Minimal code complexity to report.
     # Default: 30 (but we recommend 10-20)
     min-complexity: 20
+  gocritic:
+    # Which checks should be enabled; can't be combined with 'disabled-checks'.
+    # See https://go-critic.github.io/overview#checks-overview.
+    # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`.
+    # By default, list of stable checks is used.
+    enabled-checks:
+      - elseif
+      - nestingReduce
+      - unnamedResult
+    #  - ruleguard
+      - truncateCmp
+      - hugeparam
+      - rangevalcopy
+      - captlocal
+      - underef
+      - toomanyresultschecker
+      - rangeexprcopy
+    # Which checks should be disabled; can't be combined with 'enabled-checks'.
+    # Default: []
+    disabled-checks:
+      - regexpMust
+    # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
+    # See https://github.com/go-critic/go-critic#usage -> section "Tags".
+    # Default: []
+    enabled-tags:
+      - diagnostic
+      - style
+      - performance
+      - experimental
+      - opinionated
+    disabled-tags:
+      - diagnostic
+      - style
+      - performance
+      - experimental
+      - opinionated
+    # Settings passed to gocritic.
+    # The settings key is the name of a supported gocritic checker.
+    # The list of supported checkers can be find in https://go-critic.github.io/overview.
+    settings:
+      # Must be valid enabled check name.
+      captLocal:
+        # Whether to restrict checker to params only.
+        # Default: true
+        paramsOnly: false
+      elseif:
+        # Whether to skip balanced if-else pairs.
+        # Default: true
+        skipBalanced: false
+      hugeParam:
+        # Size in bytes that makes the warning trigger.
+        # Default: 80
+        sizeThreshold: 70
+      nestingReduce:
+        # Min number of statements inside a branch to trigger a warning.
+        # Default: 5
+        bodyWidth: 4
+      rangeExprCopy:
+        # Size in bytes that makes the warning trigger.
+        # Default: 512
+        sizeThreshold: 516
+        # Whether to check test functions
+        # Default: true
+        skipTestFuncs: false
+      rangeValCopy:
+        # Size in bytes that makes the warning trigger.
+        # Default: 128
+        sizeThreshold: 32
+        # Whether to check test functions.
+        # Default: true
+        skipTestFuncs: false
+      ruleguard:
+        # Enable debug to identify which 'Where' condition was rejected.
+        # The value of the parameter is the name of a function in a ruleguard file.
+        #
+        # When a rule is evaluated:
+        # If:
+        #   The Match() clause is accepted; and
+        #   One of the conditions in the Where() clause is rejected,
+        # Then:
+        #   ruleguard prints the specific Where() condition that was rejected.
+        #
+        # The flag is passed to the ruleguard 'debug-group' argument.
+        # Default: ""
+        debug: 'emptyDecl'
+        # Deprecated, use 'failOn' param.
+        # If set to true, identical to failOn='all', otherwise failOn=''
+        failOnError: false
+        # Determines the behavior when an error occurs while parsing ruleguard files.
+        # If flag is not set, log error and skip rule files that contain an error.
+        # If flag is set, the value must be a comma-separated list of error conditions.
+        # - 'all':    fail on all errors.
+        # - 'import': ruleguard rule imports a package that cannot be found.
+        # - 'dsl':    gorule file does not comply with the ruleguard DSL.
+        # Default: ""
+        failOn: dsl
+        # Comma-separated list of file paths containing ruleguard rules.
+        # If a path is relative, it is relative to the directory where the golangci-lint command is executed.
+        # The special '${configDir}' variable is substituted with the absolute directory containing the golangci config file.
+        # Glob patterns such as 'rules-*.go' may be specified.
+        # Default: ""
+        rules: '${configDir}/ruleguard/rules-*.go,${configDir}/myrule1.go'
+        # Comma-separated list of enabled groups or skip empty to enable everything.
+        # Tags can be defined with # character prefix.
+        # Default: "<all>"
+        enable: "myGroupName,#myTagName"
+        # Comma-separated list of disabled groups or skip empty to enable everything.
+        # Tags can be defined with # character prefix.
+        # Default: ""
+        disable: "myGroupName,#myTagName"
+      tooManyResultsChecker:
+        # Maximum number of results.
+        # Default: 5
+        maxResults: 10
+      truncateCmp:
+        # Whether to skip int/uint/uintptr types.
+        # Default: true
+        skipArchDependent: false
+      underef:
+        # Whether to skip (*x).method() calls where x is a pointer receiver.
+        # Default: true
+        skipRecvDeref: false
+      unnamedResult:
+        # Whether to check exported functions.
+        # Default: false
+        checkExported: true
 issues:
   exclude-rules:
     - path: _test\.go
diff --git a/shardingsphere-operator/api/v1alpha1/compute_node_types.go b/shardingsphere-operator/api/v1alpha1/compute_node_types.go
index 9f07385..e980fcf 100644
--- a/shardingsphere-operator/api/v1alpha1/compute_node_types.go
+++ b/shardingsphere-operator/api/v1alpha1/compute_node_types.go
@@ -336,6 +336,7 @@ type ComputeNodeConditionType string
 const (
 	ComputeNodeConditionInitialized ComputeNodeConditionType = "Initialized"
 	ComputeNodeConditionStarted     ComputeNodeConditionType = "Started"
+	ComputeNodeConditionSucceed     ComputeNodeConditionType = "Succeed"
 	ComputeNodeConditionReady       ComputeNodeConditionType = "Ready"
 	ComputeNodeConditionUnknown     ComputeNodeConditionType = "Unknown"
 	ComputeNodeConditionDeployed    ComputeNodeConditionType = "Deployed"
diff --git a/shardingsphere-operator/api/v1alpha1/proxy_status.go b/shardingsphere-operator/api/v1alpha1/proxy_status.go
index 238230f..e70d445 100644
--- a/shardingsphere-operator/api/v1alpha1/proxy_status.go
+++ b/shardingsphere-operator/api/v1alpha1/proxy_status.go
@@ -35,6 +35,7 @@ const (
 	ConditionDeployed    ConditionType = "Deployed"
 	ConditionInitialized ConditionType = "Initialized"
 	ConditionStarted     ConditionType = "Started"
+	ConditionSucceed     ConditionType = "Succeed"
 	ConditionReady       ConditionType = "Ready"
 	ConditionUnknown     ConditionType = "Unknown"
 	ConditionFailed      ConditionType = "Failed"
diff --git a/shardingsphere-operator/pkg/controllers/compute_node_controller.go b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
index 9fb84a7..7d7f862 100644
--- a/shardingsphere-operator/pkg/controllers/compute_node_controller.go
+++ b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
@@ -164,11 +164,11 @@ func (r *ComputeNodeReconciler) createService(ctx context.Context, cn *v1alpha1.
 
 func (r *ComputeNodeReconciler) updateService(ctx context.Context, cn *v1alpha1.ComputeNode, cur *v1.Service) error {
 	if cn.Spec.ServiceType == v1.ServiceTypeNodePort {
-		for _, p := range cur.Spec.Ports {
-			for idx := range cn.Spec.PortBindings {
-				if p.Name == cn.Spec.PortBindings[idx].Name {
-					if cn.Spec.PortBindings[idx].NodePort == 0 {
-						cn.Spec.PortBindings[idx].NodePort = p.NodePort
+		for idx := range cur.Spec.Ports {
+			for i := range cn.Spec.PortBindings {
+				if cur.Spec.Ports[idx].Name == cn.Spec.PortBindings[i].Name {
+					if cn.Spec.PortBindings[i].NodePort == 0 {
+						cn.Spec.PortBindings[i].NodePort = cur.Spec.Ports[idx].NodePort
 						if err := r.Update(ctx, cn); err != nil {
 							return err
 						}
@@ -264,21 +264,21 @@ func (r *ComputeNodeReconciler) reconcileStatus(ctx context.Context, cn *v1alpha
 		return err
 	}
 
-	status := reconcileComputeNodeStatus(*podlist, *service)
+	status := reconcileComputeNodeStatus(podlist, service)
 	rt.Status = *status
 
 	// TODO: Compare Status with or without modification
 	return r.Status().Update(ctx, rt)
 }
 
-func getReadyProxyInstances(podlist v1.PodList) int32 {
+func getReadyProxyInstances(podlist *v1.PodList) int32 {
 	var cnt int32
-	for _, p := range podlist.Items {
-		if p.Status.Phase == v1.PodRunning {
-			for _, c := range p.Status.Conditions {
-				if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
-					for _, con := range p.Status.ContainerStatuses {
-						if con.Name == "shardingsphere-proxy" && con.Ready {
+	for idx := range podlist.Items {
+		if podlist.Items[idx].Status.Phase == v1.PodRunning {
+			for i := range podlist.Items[idx].Status.Conditions {
+				if podlist.Items[idx].Status.Conditions[i].Type == v1.PodReady && podlist.Items[idx].Status.Conditions[i].Status == v1.ConditionTrue {
+					for j := range podlist.Items[idx].Status.ContainerStatuses {
+						if podlist.Items[idx].Status.ContainerStatuses[j].Name == "shardingsphere-proxy" && podlist.Items[idx].Status.ContainerStatuses[j].Ready {
 							cnt++
 						}
 					}
@@ -289,7 +289,7 @@ func getReadyProxyInstances(podlist v1.PodList) int32 {
 	return cnt
 }
 
-func newConditions(conditions []v1alpha1.ComputeNodeCondition, cond v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+func newConditions(conditions []v1alpha1.ComputeNodeCondition, cond *v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
 	if conditions == nil {
 		conditions = []v1alpha1.ComputeNodeCondition{}
 	}
@@ -299,26 +299,27 @@ func newConditions(conditions []v1alpha1.ComputeNodeCondition, cond v1alpha1.Com
 
 	found := false
 	for idx := range conditions {
-		if conditions[idx].Type == cond.Type {
-			conditions[idx].LastUpdateTime = cond.LastUpdateTime
-			conditions[idx].Status = cond.Status
-			found = true
-			break
+		if conditions[idx].Type != cond.Type {
+			continue
 		}
+		conditions[idx].LastUpdateTime = cond.LastUpdateTime
+		conditions[idx].Status = cond.Status
+		found = true
+		break
 	}
 
 	if !found {
-		conditions = append(conditions, cond)
+		conditions = append(conditions, *cond)
 	}
 
 	return conditions
 }
 
-func updateReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+func updateReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond *v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
 	return newConditions(conditions, cond)
 }
 
-func updateNotReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+func updateNotReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond *v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
 	cur := newConditions(conditions, cond)
 
 	for idx := range cur {
@@ -331,7 +332,7 @@ func updateNotReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond v
 	return cur
 }
 
-func clusterCondition(podlist v1.PodList) v1alpha1.ComputeNodeCondition {
+func clusterCondition(podlist *v1.PodList) v1alpha1.ComputeNodeCondition {
 	cond := v1alpha1.ComputeNodeCondition{}
 	if len(podlist.Items) == 0 {
 		return cond
@@ -342,6 +343,13 @@ func clusterCondition(podlist v1.PodList) v1alpha1.ComputeNodeCondition {
 		Status:         v1alpha1.ConditionStatusTrue,
 		LastUpdateTime: metav1.Now(),
 	}
+
+	condSucceed := v1alpha1.ComputeNodeCondition{
+		Type:           v1alpha1.ComputeNodeConditionSucceed,
+		Status:         v1alpha1.ConditionStatusTrue,
+		LastUpdateTime: metav1.Now(),
+	}
+
 	condUnknown := v1alpha1.ComputeNodeCondition{
 		Type:           v1alpha1.ComputeNodeConditionUnknown,
 		Status:         v1alpha1.ConditionStatusTrue,
@@ -359,8 +367,10 @@ func clusterCondition(podlist v1.PodList) v1alpha1.ComputeNodeCondition {
 	}
 
 	//FIXME: do not capture ConditionStarted in some cases
-	for _, p := range podlist.Items {
-		switch p.Status.Phase {
+	for idx := range podlist.Items {
+		switch podlist.Items[idx].Status.Phase {
+		case v1.PodSucceeded:
+			return condSucceed
 		case v1.PodRunning:
 			return condStarted
 		case v1.PodUnknown:
@@ -374,38 +384,38 @@ func clusterCondition(podlist v1.PodList) v1alpha1.ComputeNodeCondition {
 	return cond
 }
 
-func reconcileComputeNodeStatus(podlist v1.PodList, svc v1.Service) *v1alpha1.ComputeNodeStatus {
-	s := &v1alpha1.ComputeNodeStatus{}
+func reconcileComputeNodeStatus(podlist *v1.PodList, svc *v1.Service) *v1alpha1.ComputeNodeStatus {
+	status := &v1alpha1.ComputeNodeStatus{}
 
-	s.Replicas = int32(len(podlist.Items))
+	status.Replicas = int32(len(podlist.Items))
 
 	readyInstances := getReadyProxyInstances(podlist)
-	s.ReadyInstances = readyInstances
-	if s.Replicas == 0 {
-		s.Phase = v1alpha1.ComputeNodeStatusNotReady
+	status.ReadyInstances = readyInstances
+	if status.Replicas == 0 {
+		status.Phase = v1alpha1.ComputeNodeStatusNotReady
 	} else {
 		if readyInstances < miniReadyCount {
-			s.Phase = v1alpha1.ComputeNodeStatusNotReady
+			status.Phase = v1alpha1.ComputeNodeStatusNotReady
 		} else {
-			s.Phase = v1alpha1.ComputeNodeStatusReady
+			status.Phase = v1alpha1.ComputeNodeStatusReady
 		}
 	}
 
-	if s.Phase == v1alpha1.ComputeNodeStatusReady {
-		s.Conditions = updateReadyConditions(s.Conditions, v1alpha1.ComputeNodeCondition{
+	if status.Phase == v1alpha1.ComputeNodeStatusReady {
+		status.Conditions = updateReadyConditions(status.Conditions, &v1alpha1.ComputeNodeCondition{
 			Type:           v1alpha1.ComputeNodeConditionReady,
 			Status:         v1alpha1.ConditionStatusTrue,
 			LastUpdateTime: metav1.Now(),
 		})
 	} else {
 		cond := clusterCondition(podlist)
-		s.Conditions = updateNotReadyConditions(s.Conditions, cond)
+		status.Conditions = updateNotReadyConditions(status.Conditions, &cond)
 	}
 
-	s.LoadBalancer.ClusterIP = svc.Spec.ClusterIP
-	s.LoadBalancer.Ingress = svc.Status.LoadBalancer.Ingress
+	status.LoadBalancer.ClusterIP = svc.Spec.ClusterIP
+	status.LoadBalancer.Ingress = svc.Status.LoadBalancer.Ingress
 
-	return s
+	return status
 }
 
 func (r *ComputeNodeReconciler) getRuntimeComputeNode(ctx context.Context, namespacedName types.NamespacedName) (*v1alpha1.ComputeNode, error) {
diff --git a/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go b/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
index 7b791f4..6954588 100644
--- a/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
+++ b/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
@@ -102,7 +102,7 @@ func Test_GetReadyProxyInstances(t *testing.T) {
 	expected := int32(1)
 
 	// call the function to get the actual result
-	actual := getReadyProxyInstances(podlist)
+	actual := getReadyProxyInstances(&podlist)
 
 	// compare the expected and actual results
 	if actual != expected {
diff --git a/shardingsphere-operator/pkg/controllers/proxy_controller.go b/shardingsphere-operator/pkg/controllers/proxy_controller.go
index 3bddc73..627fab7 100644
--- a/shardingsphere-operator/pkg/controllers/proxy_controller.go
+++ b/shardingsphere-operator/pkg/controllers/proxy_controller.go
@@ -40,7 +40,7 @@ import (
 const (
 	//WaitingForReady Time selection reference kubelet restart time
 	WaitingForReady = 10 * time.Second
-	//miniReadyCount Minimum number of replicas that can be served
+	// miniReadyCount Minimum number of replicas that can be served
 	miniReadyCount = 1
 
 	proxyControllerName = "proxy_controller"
@@ -152,13 +152,12 @@ func (r *ProxyReconciler) reconcileHPA(ctx context.Context, namespacedName types
 	if err := r.Get(ctx, namespacedName, hpa); err != nil {
 		if !apierrors.IsNotFound(err) {
 			return ctrl.Result{}, err
-		} else {
-			if ssproxy.Spec.AutomaticScaling != nil && ssproxy.Spec.AutomaticScaling.Enable {
-				exp := reconcile.NewHPA(ssproxy)
-				if err := r.Create(ctx, exp); err != nil {
-					return ctrl.Result{}, err
-				}
+		} else if ssproxy.Spec.AutomaticScaling != nil && ssproxy.Spec.AutomaticScaling.Enable {
+			exp := reconcile.NewHPA(ssproxy)
+			if err := r.Create(ctx, exp); err != nil {
+				return ctrl.Result{}, err
 			}
+
 		}
 	} else {
 		if ssproxy.Spec.AutomaticScaling == nil || !ssproxy.Spec.AutomaticScaling.Enable {
@@ -221,7 +220,7 @@ func (r *ProxyReconciler) reconcilePodList(ctx context.Context, namespace, name
 		return ctrl.Result{}, err
 	}
 
-	rt.Status = reconcile.ReconcileStatus(*podList, *rt)
+	rt.Status = reconcile.ReconcileStatus(podList, rt)
 
 	// TODO: Compare Status with or without modification
 	if err := r.Status().Update(ctx, rt); err != nil {
diff --git a/shardingsphere-operator/pkg/reconcile/computenode/deployment.go b/shardingsphere-operator/pkg/reconcile/computenode/deployment.go
index 96c655d..9a94c2e 100644
--- a/shardingsphere-operator/pkg/reconcile/computenode/deployment.go
+++ b/shardingsphere-operator/pkg/reconcile/computenode/deployment.go
@@ -242,8 +242,8 @@ func (c *containerBuilder) SetVolumeMount(mount *corev1.VolumeMount) ContainerBu
 	if c.container.VolumeMounts == nil {
 		c.container.VolumeMounts = []corev1.VolumeMount{*mount}
 	} else {
-		for idx, v := range c.container.VolumeMounts {
-			if v.Name == mount.Name {
+		for idx := range c.container.VolumeMounts {
+			if c.container.VolumeMounts[idx].Name == mount.Name {
 				c.container.VolumeMounts[idx] = *mount
 				return c
 			}
@@ -333,8 +333,8 @@ func (d *deploymentBuilder) SetShardingSphereProxyContainer(proxy *corev1.Contai
 		d.deployment.Spec.Template.Spec.Containers = []corev1.Container{*proxy}
 	}
 
-	for idx, container := range d.deployment.Spec.Template.Spec.Containers {
-		if container.Name == defaultContainerName {
+	for idx := range d.deployment.Spec.Template.Spec.Containers {
+		if d.deployment.Spec.Template.Spec.Containers[idx].Name == defaultContainerName {
 			d.deployment.Spec.Template.Spec.Containers[idx] = *proxy
 			return d
 		}
@@ -350,8 +350,8 @@ func (d *deploymentBuilder) SetInitContainer(init *corev1.Container) DeploymentB
 		d.deployment.Spec.Template.Spec.InitContainers = []corev1.Container{}
 	}
 
-	for idx, container := range d.deployment.Spec.Template.Spec.InitContainers {
-		if container.Name == init.Name {
+	for idx := range d.deployment.Spec.Template.Spec.InitContainers {
+		if d.deployment.Spec.Template.Spec.InitContainers[idx].Name == init.Name {
 			d.deployment.Spec.Template.Spec.InitContainers[idx] = *init
 			return d
 		}
@@ -513,19 +513,19 @@ func (b *volumeAndMountBuilder) Build() (*corev1.Volume, *corev1.VolumeMount) {
 }
 
 // SetVolume sets a volume for Deployment
-func (d *deploymentBuilder) SetVolume(v *corev1.Volume) DeploymentBuilder {
+func (d *deploymentBuilder) SetVolume(vol *corev1.Volume) DeploymentBuilder {
 	if d.deployment.Spec.Template.Spec.Volumes == nil {
-		d.deployment.Spec.Template.Spec.Volumes = []corev1.Volume{*v}
+		d.deployment.Spec.Template.Spec.Volumes = []corev1.Volume{*vol}
 	}
 
-	for idx, vol := range d.deployment.Spec.Template.Spec.Volumes {
-		if vol.Name == v.Name {
-			d.deployment.Spec.Template.Spec.Volumes[idx] = *v
+	for idx := range d.deployment.Spec.Template.Spec.Volumes {
+		if d.deployment.Spec.Template.Spec.Volumes[idx].Name == vol.Name {
+			d.deployment.Spec.Template.Spec.Volumes[idx] = *vol
 			return d
 		}
 	}
 
-	d.deployment.Spec.Template.Spec.Volumes = append(d.deployment.Spec.Template.Spec.Volumes, *v)
+	d.deployment.Spec.Template.Spec.Volumes = append(d.deployment.Spec.Template.Spec.Volumes, *vol)
 	return d
 }
 
@@ -540,12 +540,12 @@ func NewDeployment(cn *v1alpha1.ComputeNode) *appsv1.Deployment {
 	builder.SetName(cn.Name).SetNamespace(cn.Namespace).SetLabelsAndSelectors(cn.Labels, cn.Spec.Selector).SetAnnotations(cn.Annotations).SetReplicas(&cn.Spec.Replicas)
 
 	ports := []corev1.ContainerPort{}
-	for _, pb := range cn.Spec.PortBindings {
+	for idx := range cn.Spec.PortBindings {
 		ports = append(ports, corev1.ContainerPort{
-			Name:          pb.Name,
-			HostIP:        pb.HostIP,
-			ContainerPort: pb.ContainerPort,
-			Protocol:      pb.Protocol,
+			Name:          cn.Spec.PortBindings[idx].Name,
+			HostIP:        cn.Spec.PortBindings[idx].HostIP,
+			ContainerPort: cn.Spec.PortBindings[idx].ContainerPort,
+			Protocol:      cn.Spec.PortBindings[idx].Protocol,
 		})
 	}
 
diff --git a/shardingsphere-operator/pkg/reconcile/computenode/service.go b/shardingsphere-operator/pkg/reconcile/computenode/service.go
index 0f52225..6fa1ccf 100644
--- a/shardingsphere-operator/pkg/reconcile/computenode/service.go
+++ b/shardingsphere-operator/pkg/reconcile/computenode/service.go
@@ -31,12 +31,12 @@ func NewService(cn *v1alpha1.ComputeNode) *corev1.Service {
 	builder.SetName(cn.Name).SetNamespace(cn.Namespace).SetLabelsAndSelectors(cn.Labels, cn.Spec.Selector).SetAnnotations(cn.Annotations).SetType(cn.Spec.ServiceType)
 
 	ports := []corev1.ServicePort{}
-	for _, pb := range cn.Spec.PortBindings {
+	for idx := range cn.Spec.PortBindings {
 		ports = append(ports, corev1.ServicePort{
-			Name:       pb.Name,
-			Port:       pb.ServicePort,
-			TargetPort: intstr.FromInt(int(pb.ContainerPort)),
-			Protocol:   pb.Protocol,
+			Name:       cn.Spec.PortBindings[idx].Name,
+			Port:       cn.Spec.PortBindings[idx].ServicePort,
+			TargetPort: intstr.FromInt(int(cn.Spec.PortBindings[idx].ContainerPort)),
+			Protocol:   cn.Spec.PortBindings[idx].Protocol,
 		})
 	}
 	builder.SetPorts(ports)
diff --git a/shardingsphere-operator/pkg/reconcile/proxy/deployment.go b/shardingsphere-operator/pkg/reconcile/proxy/deployment.go
index 22027e8..0d735c3 100644
--- a/shardingsphere-operator/pkg/reconcile/proxy/deployment.go
+++ b/shardingsphere-operator/pkg/reconcile/proxy/deployment.go
@@ -42,7 +42,7 @@ const (
 	// AnnoRollingUpdateMaxUnavailable refers to Deployment RollingUpdate Strategy
 	AnnoRollingUpdateMaxUnavailable = "shardingsphereproxy.shardingsphere.org/rolling-update-max-unavailable"
 
-	//miniReadyCount Minimum number of replicas that can be served
+	// miniReadyCount Minimum number of replicas that can be served
 	miniReadyCount = 1
 )
 
@@ -258,10 +258,10 @@ func updateReplicas(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment) *in
 func updatePodTemplateSpec(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment) corev1.PodTemplateSpec {
 	exp := act.Spec.Template.DeepCopy()
 
-	SSProxyContainer := updateSSProxyContainer(proxy, act)
+	ssProxyContainer := updateSSProxyContainer(proxy, act)
 	for i := range exp.Spec.Containers {
 		if exp.Spec.Containers[i].Name == "proxy" {
-			exp.Spec.Containers[i] = *SSProxyContainer
+			exp.Spec.Containers[i] = *ssProxyContainer
 		}
 	}
 
@@ -290,16 +290,16 @@ func updateConfigName(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment) s
 func updateInitContainer(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment) *corev1.Container {
 	var exp *corev1.Container
 
-	for _, c := range act.Spec.Template.Spec.InitContainers {
-		if c.Name == "download-mysql-connect" {
-			for i := range c.Env {
-				if c.Env[i].Name == "VERSION" {
-					if c.Env[i].Value != proxy.Spec.MySQLDriver.Version {
-						c.Env[i].Value = proxy.Spec.MySQLDriver.Version
+	for idx := range act.Spec.Template.Spec.InitContainers {
+		if act.Spec.Template.Spec.InitContainers[idx].Name == "download-mysql-connect" {
+			for i := range act.Spec.Template.Spec.InitContainers[idx].Env {
+				if act.Spec.Template.Spec.InitContainers[idx].Env[i].Name == "VERSION" {
+					if act.Spec.Template.Spec.InitContainers[idx].Env[i].Value != proxy.Spec.MySQLDriver.Version {
+						act.Spec.Template.Spec.InitContainers[idx].Env[i].Value = proxy.Spec.MySQLDriver.Version
 					}
 				}
 			}
-			exp = c.DeepCopy()
+			exp = act.Spec.Template.Spec.InitContainers[idx].DeepCopy()
 		}
 	}
 
@@ -309,52 +309,54 @@ func updateInitContainer(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment
 func updateSSProxyContainer(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment) *corev1.Container {
 	var exp *corev1.Container
 
-	for _, c := range act.Spec.Template.Spec.Containers {
-		if c.Name == "proxy" {
-			exp = c.DeepCopy()
+	for idx := range act.Spec.Template.Spec.Containers {
+		if act.Spec.Template.Spec.Containers[idx].Name != "proxy" {
+			continue
+		}
 
-			tag := strings.Split(c.Image, ":")[1]
-			if tag != proxy.Spec.Version {
-				exp.Image = fmt.Sprintf("%s:%s", imageName, proxy.Spec.Version)
-			}
+		exp = act.Spec.Template.Spec.Containers[idx].DeepCopy()
 
-			exp.Resources = proxy.Spec.Resources
+		tag := strings.Split(act.Spec.Template.Spec.Containers[idx].Image, ":")[1]
+		if tag != proxy.Spec.Version {
+			exp.Image = fmt.Sprintf("%s:%s", imageName, proxy.Spec.Version)
+		}
 
-			if proxy.Spec.LivenessProbe != nil && !reflect.DeepEqual(c.LivenessProbe, *proxy.Spec.LivenessProbe) {
-				exp.LivenessProbe = proxy.Spec.LivenessProbe
-			}
+		exp.Resources = proxy.Spec.Resources
 
-			if proxy.Spec.ReadinessProbe != nil && !reflect.DeepEqual(c.ReadinessProbe, *proxy.Spec.ReadinessProbe) {
-				exp.ReadinessProbe = proxy.Spec.ReadinessProbe
-			}
+		if proxy.Spec.LivenessProbe != nil && !reflect.DeepEqual(act.Spec.Template.Spec.Containers[idx].LivenessProbe, *proxy.Spec.LivenessProbe) {
+			exp.LivenessProbe = proxy.Spec.LivenessProbe
+		}
 
-			if proxy.Spec.StartupProbe != nil && !reflect.DeepEqual(c.StartupProbe, *proxy.Spec.StartupProbe) {
-				exp.StartupProbe = proxy.Spec.StartupProbe
-			}
+		if proxy.Spec.ReadinessProbe != nil && !reflect.DeepEqual(act.Spec.Template.Spec.Containers[idx].ReadinessProbe, *proxy.Spec.ReadinessProbe) {
+			exp.ReadinessProbe = proxy.Spec.ReadinessProbe
+		}
 
-			for i := range c.Env {
-				if c.Env[i].Name == "PORT" {
-					proxyPort := strconv.FormatInt(int64(proxy.Spec.Port), 10)
-					if c.Env[i].Value != proxyPort {
-						c.Env[i].Value = proxyPort
-						exp.Ports[0].ContainerPort = proxy.Spec.Port
-					}
+		if proxy.Spec.StartupProbe != nil && !reflect.DeepEqual(act.Spec.Template.Spec.Containers[idx].StartupProbe, *proxy.Spec.StartupProbe) {
+			exp.StartupProbe = proxy.Spec.StartupProbe
+		}
+
+		for i := range act.Spec.Template.Spec.Containers[idx].Env {
+			if act.Spec.Template.Spec.Containers[idx].Env[i].Name == "PORT" {
+				proxyPort := strconv.FormatInt(int64(proxy.Spec.Port), 10)
+				if act.Spec.Template.Spec.Containers[idx].Env[i].Value != proxyPort {
+					act.Spec.Template.Spec.Containers[idx].Env[i].Value = proxyPort
+					exp.Ports[0].ContainerPort = proxy.Spec.Port
 				}
 			}
-			exp.Env = c.Env
 		}
+		exp.Env = act.Spec.Template.Spec.Containers[idx].Env
 	}
 	return exp
 }
 
-func getReadyNodes(podlist corev1.PodList) int32 {
+func getReadyNodes(podlist *corev1.PodList) int32 {
 	var cnt int32
-	for _, p := range podlist.Items {
-		if p.Status.Phase == corev1.PodRunning {
-			for _, c := range p.Status.Conditions {
-				if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
-					for _, con := range p.Status.ContainerStatuses {
-						if con.Name == "proxy" && con.Ready {
+	for idx := range podlist.Items {
+		if podlist.Items[idx].Status.Phase == corev1.PodRunning {
+			for i := range podlist.Items[idx].Status.Conditions {
+				if podlist.Items[idx].Status.Conditions[i].Type == corev1.PodReady && podlist.Items[idx].Status.Conditions[i].Status == corev1.ConditionTrue {
+					for j := range podlist.Items[idx].Status.ContainerStatuses {
+						if podlist.Items[idx].Status.ContainerStatuses[j].Name == "proxy" && podlist.Items[idx].Status.ContainerStatuses[j].Ready {
 							cnt++
 						}
 					}
@@ -366,7 +368,7 @@ func getReadyNodes(podlist corev1.PodList) int32 {
 }
 
 // ReconcileStatus returns the status of ShardingSphereProxy
-func ReconcileStatus(podlist corev1.PodList, rt v1alpha1.ShardingSphereProxy) v1alpha1.ProxyStatus {
+func ReconcileStatus(podlist *corev1.PodList, rt *v1alpha1.ShardingSphereProxy) v1alpha1.ProxyStatus {
 	readyNodes := getReadyNodes(podlist)
 
 	rt.Status.ReadyNodes = readyNodes
@@ -404,12 +406,14 @@ func newConditions(conditions []v1alpha1.Condition, cond v1alpha1.Condition) []v
 
 	found := false
 	for idx := range conditions {
-		if conditions[idx].Type == cond.Type {
-			conditions[idx].LastUpdateTime = cond.LastUpdateTime
-			conditions[idx].Status = cond.Status
-			found = true
-			break
+		if conditions[idx].Type != cond.Type {
+			continue
 		}
+
+		conditions[idx].LastUpdateTime = cond.LastUpdateTime
+		conditions[idx].Status = cond.Status
+		found = true
+		break
 	}
 
 	if !found {
@@ -436,7 +440,7 @@ func updateNotReadyConditions(conditions []v1alpha1.Condition, cond v1alpha1.Con
 	return cur
 }
 
-func clusterCondition(podlist corev1.PodList) v1alpha1.Condition {
+func clusterCondition(podlist *corev1.PodList) v1alpha1.Condition {
 	cond := v1alpha1.Condition{}
 	if len(podlist.Items) == 0 {
 		return cond
@@ -447,6 +451,13 @@ func clusterCondition(podlist corev1.PodList) v1alpha1.Condition {
 		Status:         metav1.ConditionTrue,
 		LastUpdateTime: metav1.Now(),
 	}
+
+	condSucceed := v1alpha1.Condition{
+		Type:           v1alpha1.ConditionSucceed,
+		Status:         metav1.ConditionTrue,
+		LastUpdateTime: metav1.Now(),
+	}
+
 	condUnknown := v1alpha1.Condition{
 		Type:           v1alpha1.ConditionUnknown,
 		Status:         metav1.ConditionTrue,
@@ -464,8 +475,10 @@ func clusterCondition(podlist corev1.PodList) v1alpha1.Condition {
 	}
 
 	//FIXME: do not capture ConditionStarted in some cases
-	for _, p := range podlist.Items {
-		switch p.Status.Phase {
+	for i := range podlist.Items {
+		switch podlist.Items[i].Status.Phase {
+		case corev1.PodSucceeded:
+			return condSucceed
 		case corev1.PodRunning:
 			return condStarted
 		case corev1.PodUnknown:
diff --git a/shardingsphere-operator/pkg/reconcile/proxy/deployment_test.go b/shardingsphere-operator/pkg/reconcile/proxy/deployment_test.go
index 78dc4b3..078e9df 100644
--- a/shardingsphere-operator/pkg/reconcile/proxy/deployment_test.go
+++ b/shardingsphere-operator/pkg/reconcile/proxy/deployment_test.go
@@ -741,7 +741,7 @@ func Test_ReconcileStatus(t *testing.T) {
 	}
 
 	for _, c := range cases {
-		act := ReconcileStatus(c.podlist, *c.exp)
+		act := ReconcileStatus(&c.podlist, c.exp)
 		assertReadyNodes(t, c.exp.Status.ReadyNodes, act.ReadyNodes, c.message)
 		assertPhase(t, c.exp.Status.Phase, act.Phase, c.message)
 		assertConditions(t, c.exp.Status.Conditions, act.Conditions, c.message)
@@ -854,12 +854,15 @@ func Test_ClusterConditions(t *testing.T) {
 					},
 				},
 			}},
-			exp: v1alpha1.Condition{},
+			exp: v1alpha1.Condition{
+				Type:   v1alpha1.ConditionSucceed,
+				Status: metav1.ConditionTrue,
+			},
 		},
 	}
 
 	for _, c := range cases {
-		act := clusterCondition(c.podlist)
+		act := clusterCondition(&c.podlist)
 		assert.Equal(t, c.exp.Type, act.Type, c.name)
 		assert.Equal(t, c.exp.Status, act.Status, c.name)
 	}
diff --git a/shardingsphere-operator/pkg/reconcile/proxy/resource.go b/shardingsphere-operator/pkg/reconcile/proxy/resource.go
index d04313f..12e30e4 100644
--- a/shardingsphere-operator/pkg/reconcile/proxy/resource.go
+++ b/shardingsphere-operator/pkg/reconcile/proxy/resource.go
@@ -27,17 +27,17 @@ const imageName = "apache/shardingsphere-proxy"
 func fromInt32(val int32) intstr.IntOrString {
 	return intstr.IntOrString{Type: intstr.Int, IntVal: val}
 }
-func isRunningPod(s v1.PodStatus) bool {
+func isRunningPod(s *v1.PodStatus) bool {
 	return s.Phase == v1.PodRunning
 }
 
-func isReadyPod(s v1.PodStatus) bool {
+func isReadyPod(s *v1.PodStatus) bool {
 	if s.ContainerStatuses != nil && len(s.ContainerStatuses) > 0 {
 		return s.ContainerStatuses[0].Ready
 	}
 	return false
 }
 
-func isNonTerminatingPod(pod v1.Pod) bool {
+func isNonTerminatingPod(pod *v1.Pod) bool {
 	return pod.ObjectMeta.DeletionTimestamp == nil
 }
diff --git a/shardingsphere-operator/pkg/reconcile/proxy/status.go b/shardingsphere-operator/pkg/reconcile/proxy/status.go
index a9a9882..769845a 100644
--- a/shardingsphere-operator/pkg/reconcile/proxy/status.go
+++ b/shardingsphere-operator/pkg/reconcile/proxy/status.go
@@ -24,8 +24,8 @@ import (
 // IsRunning returns true if one of the Pods is running
 func IsRunning(podList *v1.PodList) bool {
 	status := false
-	for _, pod := range podList.Items {
-		if isNonTerminatingPod(pod) && isRunningPod(pod.Status) {
+	for i := range podList.Items {
+		if isNonTerminatingPod(&podList.Items[i]) && isRunningPod(&podList.Items[i].Status) {
 			status = true
 			break
 		}
@@ -36,12 +36,12 @@ func IsRunning(podList *v1.PodList) bool {
 // CountingReadyPods returns the current count of ready pods
 func CountingReadyPods(podList *v1.PodList) int32 {
 	var readyPods int32 = 0
-	for _, pod := range podList.Items {
-		if len(pod.Status.ContainerStatuses) == 0 {
+	for i := range podList.Items {
+		if len(podList.Items[i].Status.ContainerStatuses) == 0 {
 			continue
 		}
 
-		if isNonTerminatingPod(pod) && isReadyPod(pod.Status) {
+		if isNonTerminatingPod(&podList.Items[i]) && isReadyPod(&podList.Items[i].Status) {
 			readyPods++
 		}
 	}
diff --git a/shardingsphere-operator/pkg/reconcile/proxyconfig/configmap.go b/shardingsphere-operator/pkg/reconcile/proxyconfig/configmap.go
index d9f70cd..18437b0 100644
--- a/shardingsphere-operator/pkg/reconcile/proxyconfig/configmap.go
+++ b/shardingsphere-operator/pkg/reconcile/proxyconfig/configmap.go
@@ -59,7 +59,7 @@ const defaultLogback = `<?xml version="1.0"?>
 
 // ConstructCascadingConfigmap Construct spec resources to Configmap
 func ConstructCascadingConfigmap(proxyConfig *v1alpha1.ShardingSphereProxyServerConfig) *v1.ConfigMap {
-	y := toYaml(proxyConfig)
+	serveryaml := toYaml(proxyConfig)
 	return &v1.ConfigMap{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      proxyConfig.Name,
@@ -69,7 +69,7 @@ func ConstructCascadingConfigmap(proxyConfig *v1alpha1.ShardingSphereProxyServer
 			},
 		},
 		Data: map[string]string{
-			"server.yaml": y,
+			"server.yaml": serveryaml,
 			"logback.xml": defaultLogback,
 		},
 	}