You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by zh...@apache.org on 2023/05/09 04:20:02 UTC

[shardingsphere-on-cloud] branch main updated: test(operator): fix conflicted tests of storagenode and chaos (#350)

This is an automated email from the ASF dual-hosted git repository.

zhaojinchao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git


The following commit(s) were added to refs/heads/main by this push:
     new 7be767f  test(operator): fix conflicted tests of storagenode and chaos (#350)
7be767f is described below

commit 7be767f3ab51a6b50c00774f498c9344177d3aa7
Author: liyao <ma...@126.com>
AuthorDate: Tue May 9 12:19:57 2023 +0800

    test(operator): fix conflicted tests of storagenode and chaos (#350)
    
    * chore: move shardingspherechaos test from reconcile to controllers
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: update style
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * test: update context to global ctx in suite
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * test: move configmap tests from kubernetes to controllers
    
    Signed-off-by: mlycore <ma...@126.com>
    
    ---------
    
    Signed-off-by: mlycore <ma...@126.com>
---
 .../controllers/compute_node_controller_test.go    | 633 ++++++++++++++++++---
 .../pkg/controllers/controllers_suite_test.go      |  27 +-
 .../controllers/shardingsphere_chaos_controller.go |   2 +-
 .../shardingsphere_chaos_controller_test.go}       |  12 +-
 .../controllers/storage_node_controller_test.go    |  50 +-
 .../kubernetes/configmap/configmap_suite_test.go   | 110 ----
 .../pkg/kubernetes/configmap/configmap_test.go     | 533 -----------------
 .../shardingsphere_chaos_suite_test.go             | 129 -----
 8 files changed, 625 insertions(+), 871 deletions(-)

diff --git a/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go b/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
index 4c69336..ca3b403 100644
--- a/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
+++ b/shardingsphere-operator/pkg/controllers/compute_node_controller_test.go
@@ -18,11 +18,14 @@
 package controllers_test
 
 import (
+	"fmt"
 	"time"
 
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
+	"gopkg.in/yaml.v2"
 	appsv1 "k8s.io/api/apps/v1"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -30,92 +33,92 @@ import (
 )
 
 /*
-func Test_GetReadyProxyInstances(t *testing.T) {
-	// create sample PodList
-	podlist := v1.PodList{
-		Items: []v1.Pod{
-			{
-				Status: v1.PodStatus{
-					Phase: v1.PodRunning,
-					Conditions: []v1.PodCondition{
-						{
-							Type:   v1.PodReady,
-							Status: v1.ConditionTrue,
+	func Test_GetReadyProxyInstances(t *testing.T) {
+		// create sample PodList
+		podlist := v1.PodList{
+			Items: []v1.Pod{
+				{
+					Status: v1.PodStatus{
+						Phase: v1.PodRunning,
+						Conditions: []v1.PodCondition{
+							{
+								Type:   v1.PodReady,
+								Status: v1.ConditionTrue,
+							},
 						},
-					},
-					ContainerStatuses: []v1.ContainerStatus{
-						{
-							Name:  "shardingsphere-proxy",
-							Ready: true,
+						ContainerStatuses: []v1.ContainerStatus{
+							{
+								Name:  "shardingsphere-proxy",
+								Ready: true,
+							},
 						},
 					},
 				},
-			},
-			{
-				Status: v1.PodStatus{
-					Phase: v1.PodRunning,
-					Conditions: []v1.PodCondition{
-						{
-							Type:   v1.PodReady,
-							Status: v1.ConditionTrue,
+				{
+					Status: v1.PodStatus{
+						Phase: v1.PodRunning,
+						Conditions: []v1.PodCondition{
+							{
+								Type:   v1.PodReady,
+								Status: v1.ConditionTrue,
+							},
 						},
-					},
-					ContainerStatuses: []v1.ContainerStatus{
-						{
-							Name:  "another-container",
-							Ready: true,
+						ContainerStatuses: []v1.ContainerStatus{
+							{
+								Name:  "another-container",
+								Ready: true,
+							},
 						},
 					},
 				},
-			},
-			{
-				Status: v1.PodStatus{
-					Phase: v1.PodRunning,
-					Conditions: []v1.PodCondition{
-						{
-							Type:   v1.PodReady,
-							Status: v1.ConditionFalse,
+				{
+					Status: v1.PodStatus{
+						Phase: v1.PodRunning,
+						Conditions: []v1.PodCondition{
+							{
+								Type:   v1.PodReady,
+								Status: v1.ConditionFalse,
+							},
 						},
-					},
-					ContainerStatuses: []v1.ContainerStatus{
-						{
-							Name:  "shardingsphere-proxy",
-							Ready: false,
+						ContainerStatuses: []v1.ContainerStatus{
+							{
+								Name:  "shardingsphere-proxy",
+								Ready: false,
+							},
 						},
 					},
 				},
-			},
-			{
-				Status: v1.PodStatus{
-					Phase: v1.PodPending,
-					Conditions: []v1.PodCondition{
-						{
-							Type:   v1.PodReady,
-							Status: v1.ConditionTrue,
+				{
+					Status: v1.PodStatus{
+						Phase: v1.PodPending,
+						Conditions: []v1.PodCondition{
+							{
+								Type:   v1.PodReady,
+								Status: v1.ConditionTrue,
+							},
 						},
-					},
-					ContainerStatuses: []v1.ContainerStatus{
-						{
-							Name:  "shardingsphere-proxy",
-							Ready: true,
+						ContainerStatuses: []v1.ContainerStatus{
+							{
+								Name:  "shardingsphere-proxy",
+								Ready: true,
+							},
 						},
 					},
 				},
 			},
-		},
-	}
+		}
 
-	// expected result is 1 because only one pod has a ready shardingsphere-proxy container
-	expected := int32(1)
+		// expected result is 1 because only one pod has a ready shardingsphere-proxy container
+		expected := int32(1)
 
-	// call the function to get the actual result
-	actual := getReadyProxyInstances(&podlist)
+		// call the function to get the actual result
+		actual := getReadyProxyInstances(&podlist)
 
-	// compare the expected and actual results
-	if actual != expected {
-		t.Errorf("getReadyInstances returned %d, expected %d", actual, expected)
+		// compare the expected and actual results
+		if actual != expected {
+			t.Errorf("getReadyInstances returned %d, expected %d", actual, expected)
+		}
 	}
-}
 */
 
 var _ = Describe("ComputeNodeController", func() {
@@ -525,3 +528,503 @@ var _ = Describe("ComputeNodeController", func() {
 	})
 
 })
+
+var _ = Describe("GetNamespacedByName", func() {
+	Context("Assert Get ConfigMap ", func() {
+		var (
+			cn = &v1alpha1.ComputeNode{
+				TypeMeta: metav1.TypeMeta{
+					Kind:       "ComputeNode",
+					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+				},
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "test",
+					Namespace: "default",
+					Labels: map[string]string{
+						"test_key": "test_value",
+					},
+				},
+				Spec: v1alpha1.ComputeNodeSpec{
+					Bootstrap: v1alpha1.BootstrapConfig{
+						AgentConfig: v1alpha1.AgentConfig{
+							Plugins: v1alpha1.AgentPlugin{
+								Logging: &v1alpha1.PluginLogging{
+									File: v1alpha1.LoggingFile{
+										Props: v1alpha1.Properties{
+											"test_logging_key": "test_logging_value",
+										},
+									},
+								},
+								Metrics: &v1alpha1.PluginMetrics{
+									Prometheus: v1alpha1.Prometheus{
+										Host: "test_host",
+										Port: 1234,
+										Props: v1alpha1.Properties{
+											"test_metrics_key": "test_metrics_value",
+										},
+									},
+								},
+								Tracing: &v1alpha1.PluginTracing{
+									OpenTracing: v1alpha1.OpenTracing{
+										Props: v1alpha1.Properties{
+											"test_opentracing_key": "test_opentracing_value",
+										},
+									},
+									OpenTelemetry: v1alpha1.OpenTelemetry{
+										Props: v1alpha1.Properties{
+											"test_opentelemetry_key": "test_opentelemetry_value",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			}
+		)
+
+		It("get configmap should be equal", func() {
+			c := configmap.NewConfigMapClient(k8sClient)
+
+			cm := c.Build(ctx, cn)
+			err := c.Create(ctx, cm)
+			Expect(err).To(BeNil())
+
+			expect, err := c.GetByNamespacedName(ctx, types.NamespacedName{
+				Name:      cn.Name,
+				Namespace: cn.Namespace,
+			})
+			Expect(expect).To(Not(BeNil()))
+
+			Expect(err).To(BeNil())
+			Expect(expect.Name).To(Equal(cm.Name))
+			Expect(expect.Namespace).To(Equal(cm.Namespace))
+			Expect(expect.Data).To(Equal(cm.Data))
+		})
+	})
+})
+
+var _ = Describe("Default ConfigMap", func() {
+	var (
+		expect = &corev1.ConfigMap{}
+		cn     = &v1alpha1.ComputeNode{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "ComputeNode",
+				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+			},
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "test_name",
+				Namespace: "test_namespace",
+				Labels: map[string]string{
+					"test_key": "test_value",
+				},
+			},
+		}
+	)
+
+	BeforeEach(func() {
+		expect.Name = "test_name"
+		expect.Namespace = "test_namespace"
+		expect.Labels = map[string]string{
+			"test_key": "test_value",
+		}
+		expect.Data = map[string]string{}
+		expect.Data[configmap.ConfigDataKeyForLogback] = configmap.DefaultLogback
+		expect.Data[configmap.ConfigDataKeyForServer] = configmap.DefaultServerConfig
+		expect.Data[configmap.ConfigDataKeyForAgent] = ""
+	})
+
+	Context("Assert ObjectMeta", func() {
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+
+		It("name should be equal", func() {
+			Expect(expect.Name).To(Equal(cm.Name))
+		})
+		It("namespace should be equal", func() {
+			Expect(expect.Namespace).To(Equal(cm.Namespace))
+		})
+		It("labels should be equal", func() {
+			Expect(expect.Labels).To(Equal(cm.Labels))
+		})
+	})
+
+	Context("Assert Default Spec Data", func() {
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+
+		It("default server config should be equal", func() {
+			Expect(expect.Data[configmap.ConfigDataKeyForServer]).To(Equal(cm.Data[configmap.ConfigDataKeyForServer]))
+		})
+		It("default logback should be equal", func() {
+			Expect(expect.Data[configmap.ConfigDataKeyForLogback]).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
+		})
+		It("default agent config should be equal", func() {
+			Expect(expect.Data[configmap.ConfigDataKeyForAgent]).To(Equal(cm.Data[configmap.ConfigDataKeyForAgent]))
+		})
+	})
+
+	Context("Assert Update Spec Data", func() {
+		cn.TypeMeta = metav1.TypeMeta{
+			Kind:       "ComputeNode",
+			APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+		}
+		cn.ObjectMeta = metav1.ObjectMeta{
+			Name:      "test_name",
+			Namespace: "test_namespace",
+			Labels: map[string]string{
+				"test_key": "test_value",
+			},
+			Annotations: map[string]string{
+				"test_anno_key": "test_anno_value",
+			},
+		}
+		cn.Spec.Bootstrap = v1alpha1.BootstrapConfig{
+			ServerConfig: v1alpha1.ServerConfig{
+				Authority: v1alpha1.ComputeNodeAuthority{
+					Users: []v1alpha1.ComputeNodeUser{
+						{
+							User:     "test_user@%",
+							Password: "test_password",
+						},
+					},
+					Privilege: v1alpha1.ComputeNodePrivilege{
+						Type: v1alpha1.AllPermitted,
+					},
+				},
+				Mode: v1alpha1.ComputeNodeServerMode{
+					Type: v1alpha1.ModeTypeCluster,
+					Repository: v1alpha1.Repository{
+						Type: v1alpha1.RepositoryTypeZookeeper,
+						Props: v1alpha1.Properties{
+							"test_repo_key": "test_repo_value",
+						},
+					},
+				},
+				Props: v1alpha1.Properties{
+					"test_prop_key": "test_prop_value",
+				},
+			},
+		}
+
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+		cm = configmap.UpdateComputeNodeConfigMap(cn, cm)
+		cfg := &v1alpha1.ServerConfig{}
+		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &cfg)
+		if err != nil {
+			fmt.Printf("Err: %s\n", err)
+		}
+
+		It("server config should be equal", func() {
+			Expect(cfg.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
+			Expect(cfg.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
+			Expect(cfg.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
+		})
+		It("default logback should be equal", func() {
+			Expect(expect.Data[configmap.ConfigDataKeyForLogback]).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
+		})
+		It("default agent config should be equal", func() {
+			Expect(expect.Data[configmap.ConfigDataKeyForAgent]).To(Equal(cm.Data[configmap.ConfigDataKeyForAgent]))
+		})
+	})
+})
+
+var _ = Describe("Standalone Server Config", func() {
+	Context("Assert Simple Service Config Data", func() {
+		cn := &v1alpha1.ComputeNode{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "ComputeNode",
+				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+			},
+			Spec: v1alpha1.ComputeNodeSpec{
+				Bootstrap: v1alpha1.BootstrapConfig{
+					ServerConfig: v1alpha1.ServerConfig{
+						Mode: v1alpha1.ComputeNodeServerMode{
+							Type: v1alpha1.ModeTypeStandalone,
+						},
+					},
+				},
+			},
+		}
+
+		expect := &v1alpha1.ServerConfig{}
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
+		if err != nil {
+			fmt.Printf("Err: %s\n", err)
+		}
+
+		It("server config authority should be equal", func() {
+			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
+		})
+		It("server config mode should be equal", func() {
+			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
+		})
+		It("server config props should be equal", func() {
+			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
+		})
+	})
+
+	Context("Assert Full Service Config Data", func() {
+		cn := &v1alpha1.ComputeNode{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "ComputeNode",
+				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+			},
+			Spec: v1alpha1.ComputeNodeSpec{
+				Bootstrap: v1alpha1.BootstrapConfig{
+					ServerConfig: v1alpha1.ServerConfig{
+						Authority: v1alpha1.ComputeNodeAuthority{
+							Users: []v1alpha1.ComputeNodeUser{
+								{
+									User:     "test_user@%",
+									Password: "test_password",
+								},
+							},
+							Privilege: v1alpha1.ComputeNodePrivilege{
+								Type: v1alpha1.AllPermitted,
+							},
+						},
+						Mode: v1alpha1.ComputeNodeServerMode{
+							Type: v1alpha1.ModeTypeStandalone,
+						},
+						Props: v1alpha1.Properties{
+							"test_prop_key": "test_prop_value",
+						},
+					},
+				},
+			},
+		}
+
+		expect := &v1alpha1.ServerConfig{}
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
+		if err != nil {
+			fmt.Printf("Err: %s\n", err)
+		}
+		It("server config authority should be equal", func() {
+			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
+		})
+		It("server config mode should be equal", func() {
+			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
+		})
+		It("server config props should be equal", func() {
+			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
+		})
+	})
+})
+
+var _ = Describe("Cluster Server Config", func() {
+	var (
+		expect = &v1alpha1.ServerConfig{}
+		cn     = &v1alpha1.ComputeNode{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "ComputeNode",
+				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+			},
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      "test_name",
+				Namespace: "test_namespace",
+				Labels: map[string]string{
+					"test_key": "test_value",
+				},
+			},
+			Spec: v1alpha1.ComputeNodeSpec{
+				Bootstrap: v1alpha1.BootstrapConfig{
+					ServerConfig: v1alpha1.ServerConfig{
+						Authority: v1alpha1.ComputeNodeAuthority{
+							Users: []v1alpha1.ComputeNodeUser{
+								{
+									User:     "test_user@%",
+									Password: "test_password",
+								},
+							},
+							Privilege: v1alpha1.ComputeNodePrivilege{
+								Type: v1alpha1.AllPermitted,
+							},
+						},
+						Mode: v1alpha1.ComputeNodeServerMode{
+							Type: v1alpha1.ModeTypeCluster,
+							Repository: v1alpha1.Repository{
+								Type: v1alpha1.RepositoryTypeZookeeper,
+								Props: v1alpha1.Properties{
+									"test_repo_key": "test_repo_value",
+								},
+							},
+						},
+						Props: v1alpha1.Properties{
+							"test_prop_key": "test_prop_value",
+						},
+					},
+				},
+			},
+		}
+	)
+
+	BeforeEach(func() {
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+
+		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
+		if err != nil {
+			fmt.Printf("Err: %s\n", err)
+		}
+	})
+
+	Context("Assert Service Config Data", func() {
+		It("server config authority should be equal", func() {
+			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
+		})
+		It("server config mode should be equal", func() {
+			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
+		})
+		It("server config props should be equal", func() {
+			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
+		})
+	})
+})
+
+var _ = Describe("Logback Config", func() {
+	Context("Assert Logback Config Data From Annotations", func() {
+		var (
+			expect = ""
+			cn     = &v1alpha1.ComputeNode{
+				TypeMeta: metav1.TypeMeta{
+					Kind:       "ComputeNode",
+					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+				},
+				ObjectMeta: metav1.ObjectMeta{
+					Annotations: map[string]string{
+						configmap.AnnoLogbackConfig: "test_logback_value",
+					},
+				},
+				Spec: v1alpha1.ComputeNodeSpec{
+					Bootstrap: v1alpha1.BootstrapConfig{
+						LogbackConfig: configmap.DefaultLogback,
+					},
+				},
+			}
+		)
+
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+		expect = "test_logback_value"
+
+		It("Logback config should be equal", func() {
+			Expect(expect).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
+		})
+	})
+
+	Context("Assert Logback Config Data", func() {
+		var (
+			expect = ""
+			cn     = &v1alpha1.ComputeNode{
+				TypeMeta: metav1.TypeMeta{
+					Kind:       "ComputeNode",
+					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+				},
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "test_name",
+					Namespace: "test_namespace",
+					Labels: map[string]string{
+						"test_key": "test_value",
+					},
+				},
+				Spec: v1alpha1.ComputeNodeSpec{
+					Bootstrap: v1alpha1.BootstrapConfig{
+						LogbackConfig: configmap.DefaultLogback,
+					},
+				},
+			}
+		)
+
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+		expect = configmap.DefaultLogback
+
+		It("Logback config should be equal", func() {
+			Expect(expect).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
+		})
+	})
+})
+
+var _ = Describe("Agent Config", func() {
+	Context("Assert Full Agent Config Data", func() {
+		var (
+			expect = &v1alpha1.AgentConfig{}
+			cn     = &v1alpha1.ComputeNode{
+				TypeMeta: metav1.TypeMeta{
+					Kind:       "ComputeNode",
+					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
+				},
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "test_name",
+					Namespace: "test_namespace",
+					Labels: map[string]string{
+						"test_key": "test_value",
+					},
+				},
+				Spec: v1alpha1.ComputeNodeSpec{
+					Bootstrap: v1alpha1.BootstrapConfig{
+						AgentConfig: v1alpha1.AgentConfig{
+							Plugins: v1alpha1.AgentPlugin{
+								Logging: &v1alpha1.PluginLogging{
+									File: v1alpha1.LoggingFile{
+										Props: v1alpha1.Properties{
+											"test_logging_key": "test_logging_value",
+										},
+									},
+								},
+								Metrics: &v1alpha1.PluginMetrics{
+									Prometheus: v1alpha1.Prometheus{
+										Host: "test_host",
+										Port: 1234,
+										Props: v1alpha1.Properties{
+											"test_metrics_key": "test_metrics_value",
+										},
+									},
+								},
+								Tracing: &v1alpha1.PluginTracing{
+									OpenTracing: v1alpha1.OpenTracing{
+										Props: v1alpha1.Properties{
+											"test_opentracing_key": "test_opentracing_value",
+										},
+									},
+									OpenTelemetry: v1alpha1.OpenTelemetry{
+										Props: v1alpha1.Properties{
+											"test_opentelemetry_key": "test_opentelemetry_value",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			}
+		)
+
+		c := configmap.NewConfigMapClient(nil)
+		cm := c.Build(ctx, cn)
+
+		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForAgent]), &expect)
+		if err != nil {
+			fmt.Printf("Err: %s\n", err)
+		}
+
+		It("agent config plugins should be equal", func() {
+			Expect(expect.Plugins).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins))
+		})
+		It("agent config logging should be equal", func() {
+			Expect(expect.Plugins.Logging).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Logging))
+		})
+		It("agent config metrics should be equal", func() {
+			Expect(expect.Plugins.Metrics).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Metrics))
+		})
+		It("agent config tracing should be equal", func() {
+			Expect(expect.Plugins.Tracing).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Tracing))
+		})
+	})
+})
diff --git a/shardingsphere-operator/pkg/controllers/controllers_suite_test.go b/shardingsphere-operator/pkg/controllers/controllers_suite_test.go
index 1cd052f..72236e9 100644
--- a/shardingsphere-operator/pkg/controllers/controllers_suite_test.go
+++ b/shardingsphere-operator/pkg/controllers/controllers_suite_test.go
@@ -23,6 +23,7 @@ import (
 	"os/exec"
 	"path/filepath"
 	"testing"
+	"time"
 
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/cmd/shardingsphere-operator/manager"
@@ -30,6 +31,7 @@ import (
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/deployment"
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/service"
+
 	dbmesh_aws "github.com/database-mesh/golang-sdk/aws"
 	dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
 	dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
@@ -50,6 +52,7 @@ var (
 	testEnv   *envtest.Environment
 	ctx       context.Context
 	cancel    context.CancelFunc
+	err       error
 )
 
 func TestControllers(t *testing.T) {
@@ -83,9 +86,11 @@ var _ = BeforeSuite(func() {
 			filepath.Join("..", "..", "config", "crd", "bases"),
 		},
 		ErrorIfCRDPathMissing: true,
+		CRDInstallOptions: envtest.CRDInstallOptions{
+			MaxTime: 60 * time.Second,
+		},
 	}
 
-	var err error
 	// cfg is defined in this file globally.
 	cfg, err = testEnv.Start()
 	Expect(err).NotTo(HaveOccurred())
@@ -101,13 +106,10 @@ var _ = BeforeSuite(func() {
 
 	// set metrics bind address to :9081, diff from default metric port:8080 and health check port:8081 to avoid conflict port when running tests
 	os.Args = append(os.Args, "--metrics-bind-address=:9081")
-
 	opt := manager.ParseOptionsFromCmdFlags()
 
 	k8sManager, err := ctrl.NewManager(cfg, opt.Options)
-
 	Expect(err).ToNot(HaveOccurred())
-
 	// print k8sManager Options
 	sess := dbmesh_aws.NewSessions().SetCredential("AwsRegion", "AwsAccessKeyID", "AwsSecretAccessKey").Build()
 	err = (&controllers.StorageNodeReconciler{
@@ -128,6 +130,22 @@ var _ = BeforeSuite(func() {
 		ConfigMap:  configmap.NewConfigMapClient(k8sManager.GetClient()),
 	}).SetupWithManager(k8sManager)
 	Expect(err).ToNot(HaveOccurred())
+	/*
+
+		ctl := gomock.NewController(GinkgoT())
+		clientset, err := clientset.NewForConfig(k8sManager.GetConfig())
+		err = (&controllers.ShardingSphereChaosReconciler{
+			Client:    k8sManager.GetClient(),
+			Scheme:    k8sManager.GetScheme(),
+			Log:       logf.Log,
+			Chaos:     mockChaos.NewMockChaos(ctl),
+			Job:       job.NewJob(k8sManager.GetClient()),
+			ConfigMap: configmap.NewConfigMapClient(k8sManager.GetClient()),
+			Events:    k8sManager.GetEventRecorderFor("shardingsphere-chaos-controller"),
+			ClientSet: clientset,
+		}).SetupWithManager(k8sManager)
+		Expect(err).ToNot(HaveOccurred())
+	*/
 
 	go func() {
 		defer GinkgoRecover()
@@ -141,5 +159,4 @@ var _ = AfterSuite(func() {
 	By("tearing down the test environment for controllers")
 	err := testEnv.Stop()
 	Expect(err).NotTo(HaveOccurred())
-
 })
diff --git a/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go b/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go
index c3d6040..2123747 100644
--- a/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go
+++ b/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go
@@ -67,8 +67,8 @@ type ShardingSphereChaosReconciler struct {
 
 	Scheme    *runtime.Scheme
 	Log       logr.Logger
-	ClientSet *clientset.Clientset
 	Events    record.EventRecorder
+	ClientSet *clientset.Clientset
 
 	Chaos     sschaos.Chaos
 	Job       job.Job
diff --git a/shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_test.go b/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller_test.go
similarity index 95%
rename from shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_test.go
rename to shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller_test.go
index 62cf4d8..8564fda 100644
--- a/shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_test.go
+++ b/shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller_test.go
@@ -15,11 +15,14 @@
  * limitations under the License.
  */
 
-package shardingspherechaos_test
+package controllers_test
 
+/*
 import (
-	"context"
 	"fmt"
+	"math/rand"
+	"time"
+
 	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
@@ -35,7 +38,8 @@ var _ = Describe("ShardingSphereChaos", func() {
 			ssChaos   *v1alpha1.ShardingSphereChaos
 			name      = fmt.Sprintf("%s-%d", "test.sschaos-", rand.Int31())
 			namespace = "default"
-			ctx       = context.Background()
+			// ctx       = context.Background()
+			// ctx = context.TODO()
 		)
 		BeforeEach(func() {
 			ssChaos = &v1alpha1.ShardingSphereChaos{
@@ -87,3 +91,5 @@ var _ = Describe("ShardingSphereChaos", func() {
 	})
 
 })
+
+*/
diff --git a/shardingsphere-operator/pkg/controllers/storage_node_controller_test.go b/shardingsphere-operator/pkg/controllers/storage_node_controller_test.go
index a9f07ea..e7dadb9 100644
--- a/shardingsphere-operator/pkg/controllers/storage_node_controller_test.go
+++ b/shardingsphere-operator/pkg/controllers/storage_node_controller_test.go
@@ -86,10 +86,10 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 				Status: v1alpha1.StorageNodeStatus{},
 			}
 
-			Expect(fakeClient.Create(context.Background(), storageNode)).Should(Succeed())
+			Expect(fakeClient.Create(ctx, storageNode)).Should(Succeed())
 			sn := &v1alpha1.StorageNode{}
-			Expect(fakeClient.Get(context.Background(), client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
-			Expect(fakeClient.Delete(context.Background(), storageNode)).Should(Succeed())
+			Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
+			Expect(fakeClient.Delete(ctx, storageNode)).Should(Succeed())
 		})
 	})
 
@@ -105,16 +105,16 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 				},
 				Status: v1alpha1.StorageNodeStatus{},
 			}
-			Expect(fakeClient.Create(context.Background(), storageNode)).Should(Succeed())
+			Expect(fakeClient.Create(ctx, storageNode)).Should(Succeed())
 			req := ctrl.Request{
 				NamespacedName: client.ObjectKey{
 					Name:      "test-storage-node",
 					Namespace: "test-namespace",
 				},
 			}
-			_, err := reconciler.Reconcile(context.Background(), req)
+			_, err := reconciler.Reconcile(ctx, req)
 			Expect(client.IgnoreNotFound(err)).Should(Succeed())
-			Expect(fakeClient.Delete(context.Background(), storageNode)).Should(Succeed())
+			Expect(fakeClient.Delete(ctx, storageNode)).Should(Succeed())
 		})
 	})
 
@@ -138,7 +138,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 					Provisioner: dbmeshv1alpha1.ProvisionerAWSRDSInstance,
 				},
 			}
-			Expect(fakeClient.Create(context.Background(), dbClass)).Should(Succeed())
+			Expect(fakeClient.Create(ctx, dbClass)).Should(Succeed())
 
 			// create storageNode
 			storageNode := &v1alpha1.StorageNode{
@@ -151,7 +151,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 				},
 			}
 
-			Expect(fakeClient.Create(context.Background(), storageNode)).Should(Succeed())
+			Expect(fakeClient.Create(ctx, storageNode)).Should(Succeed())
 		})
 
 		AfterEach(func() {
@@ -177,11 +177,11 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 
 			// mock aws rds client
 			mockAws.EXPECT().GetInstance(gomock.Any(), gomock.Any()).Return(rdsInstance, nil).AnyTimes()
-			_, err := reconciler.Reconcile(context.Background(), req)
+			_, err := reconciler.Reconcile(ctx, req)
 			Expect(err).To(BeNil())
 
 			newSN := &v1alpha1.StorageNode{}
-			Expect(fakeClient.Get(context.Background(), client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
+			Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
 			Expect(newSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseNotReady))
 			Expect(newSN.Status.Instances).To(HaveLen(1))
 			Expect(newSN.Status.Instances[0].Status).To(Equal("creating"))
@@ -205,11 +205,11 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 
 			// mock aws rds client
 			mockAws.EXPECT().GetInstance(gomock.Any(), gomock.Any()).Return(rdsInstance, nil)
-			_, err := reconciler.Reconcile(context.Background(), req)
+			_, err := reconciler.Reconcile(ctx, req)
 			Expect(err).To(BeNil())
 
 			newSN := &v1alpha1.StorageNode{}
-			Expect(fakeClient.Get(context.Background(), client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
+			Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
 
 			Expect(newSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseReady))
 			Expect(newSN.Status.Instances).To(HaveLen(1))
@@ -235,17 +235,17 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
 			// mock aws rds client, get instance
 			mockAws.EXPECT().GetInstance(gomock.Any(), gomock.Any()).Return(rdsInstance, nil).AnyTimes()
 			// reconcile storage node, add instance and set status to ready
-			_, err := reconciler.Reconcile(context.Background(), req)
+			_, err := reconciler.Reconcile(ctx, req)
 			Expect(err).To(BeNil())
 
 			// delete storage node
 			sn := &v1alpha1.StorageNode{}
-			Expect(fakeClient.Get(context.Background(), client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
-			Expect(fakeClient.Delete(context.Background(), sn)).Should(Succeed())
+			Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
+			Expect(fakeClient.Delete(ctx, sn)).Should(Succeed())
 
 			// mock aws rds client, delete instance
 			mockAws.EXPECT().DeleteInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
-			_, err = reconciler.Reconcile(context.Background(), req)
+			_, err = reconciler.Reconcile(ctx, req)
 			Expect(err).To(BeNil())
 		})
 	})
@@ -273,13 +273,13 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
 			},
 		}
 
-		Expect(k8sClient.Create(context.Background(), databaseClass)).Should(Succeed())
+		Expect(k8sClient.Create(ctx, databaseClass)).Should(Succeed())
 	})
 
 	AfterEach(func() {
 		databaseClass := &dbmeshv1alpha1.DatabaseClass{}
-		Expect(k8sClient.Get(context.Background(), client.ObjectKey{Name: databaseClassName}, databaseClass)).Should(Succeed())
-		Expect(k8sClient.Delete(context.Background(), databaseClass)).Should(Succeed())
+		Expect(k8sClient.Get(ctx, client.ObjectKey{Name: databaseClassName}, databaseClass)).Should(Succeed())
+		Expect(k8sClient.Delete(ctx, databaseClass)).Should(Succeed())
 	})
 
 	Context("reconcile storageNode", func() {
@@ -323,17 +323,17 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
 			}
 
 			// create resource
-			Expect(k8sClient.Create(context.Background(), node)).Should(Succeed())
+			Expect(k8sClient.Create(ctx, node)).Should(Succeed())
 
 			// check storage node status
 			Eventually(func() v1alpha1.StorageNodePhaseStatus {
 				newSN := &v1alpha1.StorageNode{}
-				Expect(k8sClient.Get(context.Background(), client.ObjectKey{Name: nodeName, Namespace: "default"}, newSN)).Should(Succeed())
+				Expect(k8sClient.Get(ctx, client.ObjectKey{Name: nodeName, Namespace: "default"}, newSN)).Should(Succeed())
 				return newSN.Status.Phase
 			}, 10*time.Second, 1*time.Second).Should(Equal(v1alpha1.StorageNodePhaseReady))
 
 			// delete resource
-			Expect(k8sClient.Delete(context.Background(), node)).Should(Succeed())
+			Expect(k8sClient.Delete(ctx, node)).Should(Succeed())
 		})
 
 		Context("reconcile storageNode with Creating instance", func() {
@@ -374,17 +374,17 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
 				}
 
 				// create resource
-				Expect(k8sClient.Create(context.Background(), node)).Should(Succeed())
+				Expect(k8sClient.Create(ctx, node)).Should(Succeed())
 
 				// check storage node status
 				Eventually(func() v1alpha1.StorageNodePhaseStatus {
 					newSN := &v1alpha1.StorageNode{}
-					Expect(k8sClient.Get(context.Background(), client.ObjectKey{Name: nodeName, Namespace: "default"}, newSN)).Should(Succeed())
+					Expect(k8sClient.Get(ctx, client.ObjectKey{Name: nodeName, Namespace: "default"}, newSN)).Should(Succeed())
 					return newSN.Status.Phase
 				}, 10*time.Second, 1*time.Second).Should(Equal(v1alpha1.StorageNodePhaseNotReady))
 
 				// delete resource
-				Expect(k8sClient.Delete(context.Background(), node)).Should(Succeed())
+				Expect(k8sClient.Delete(ctx, node)).Should(Succeed())
 			})
 		})
 	})
diff --git a/shardingsphere-operator/pkg/kubernetes/configmap/configmap_suite_test.go b/shardingsphere-operator/pkg/kubernetes/configmap/configmap_suite_test.go
deleted file mode 100644
index 68ee272..0000000
--- a/shardingsphere-operator/pkg/kubernetes/configmap/configmap_suite_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package configmap_test
-
-import (
-	"context"
-	"fmt"
-	"path/filepath"
-	"testing"
-	"time"
-
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/deployment"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/service"
-
-	. "github.com/onsi/ginkgo/v2"
-	. "github.com/onsi/gomega"
-	"k8s.io/client-go/kubernetes/scheme"
-	"k8s.io/client-go/rest"
-	ctrl "sigs.k8s.io/controller-runtime"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"sigs.k8s.io/controller-runtime/pkg/envtest"
-	logf "sigs.k8s.io/controller-runtime/pkg/log"
-	"sigs.k8s.io/controller-runtime/pkg/log/zap"
-)
-
-func TestConfigMap(t *testing.T) {
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "ConfigMap Suite")
-}
-
-var (
-	cfg       *rest.Config
-	k8sClient client.Client
-	testEnv   *envtest.Environment
-	ctx       context.Context
-	cancel    context.CancelFunc
-	err       error
-)
-
-var _ = BeforeSuite(func() {
-	logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
-	ctx, cancel = context.WithCancel(context.TODO())
-
-	By("bootstrapping test environment")
-	testEnv = &envtest.Environment{
-		CRDDirectoryPaths:     []string{filepath.Join("..", "..", "..", "config", "crd", "bases")},
-		ErrorIfCRDPathMissing: true,
-		CRDInstallOptions: envtest.CRDInstallOptions{
-			MaxTime: 60 * time.Second,
-		},
-	}
-	cfg, err = testEnv.Start()
-	Expect(err).NotTo(HaveOccurred())
-	Expect(cfg).NotTo(BeNil())
-
-	err = v1alpha1.AddToScheme(scheme.Scheme)
-	Expect(err).NotTo(HaveOccurred())
-
-	k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
-	fmt.Printf("before: %p\n", k8sClient)
-	Expect(err).NotTo(HaveOccurred())
-	Expect(k8sClient).NotTo(BeNil())
-
-	k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
-		Scheme: scheme.Scheme,
-	})
-	Expect(err).ToNot(HaveOccurred())
-
-	err = (&controllers.ComputeNodeReconciler{
-		Client:     k8sManager.GetClient(),
-		Scheme:     k8sManager.GetScheme(),
-		Log:        logf.Log,
-		Deployment: deployment.NewDeploymentClient(k8sManager.GetClient()),
-		Service:    service.NewServiceClient(k8sManager.GetClient()),
-		ConfigMap:  configmap.NewConfigMapClient(k8sManager.GetClient()),
-	}).SetupWithManager(k8sManager)
-	Expect(err).ToNot(HaveOccurred())
-
-	//+kubebuilder:scaffold:scheme
-	go func() {
-		defer GinkgoRecover()
-		err = k8sManager.Start(ctx)
-		Expect(err).ToNot(HaveOccurred(), "failed to run manager")
-	}()
-})
-
-var _ = AfterSuite(func() {
-	cancel()
-	By("tearing down the test environment")
-	err := testEnv.Stop()
-	Expect(err).NotTo(HaveOccurred())
-})
diff --git a/shardingsphere-operator/pkg/kubernetes/configmap/configmap_test.go b/shardingsphere-operator/pkg/kubernetes/configmap/configmap_test.go
deleted file mode 100644
index 6d4c3d2..0000000
--- a/shardingsphere-operator/pkg/kubernetes/configmap/configmap_test.go
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package configmap_test
-
-import (
-	"context"
-	"fmt"
-
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
-	"gopkg.in/yaml.v2"
-
-	. "github.com/onsi/ginkgo/v2"
-	. "github.com/onsi/gomega"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/types"
-)
-
-var _ = Describe("Default ConfigMap", func() {
-	var (
-		expect = &corev1.ConfigMap{}
-		cn     = &v1alpha1.ComputeNode{
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "ComputeNode",
-				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-			},
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "test_name",
-				Namespace: "test_namespace",
-				Labels: map[string]string{
-					"test_key": "test_value",
-				},
-			},
-		}
-	)
-
-	BeforeEach(func() {
-		expect.Name = "test_name"
-		expect.Namespace = "test_namespace"
-		expect.Labels = map[string]string{
-			"test_key": "test_value",
-		}
-		expect.Data = map[string]string{}
-		expect.Data[configmap.ConfigDataKeyForLogback] = configmap.DefaultLogback
-		expect.Data[configmap.ConfigDataKeyForServer] = configmap.DefaultServerConfig
-		expect.Data[configmap.ConfigDataKeyForAgent] = ""
-	})
-
-	Context("Assert ObjectMeta", func() {
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-
-		It("name should be equal", func() {
-			Expect(expect.Name).To(Equal(cm.Name))
-		})
-		It("namespace should be equal", func() {
-			Expect(expect.Namespace).To(Equal(cm.Namespace))
-		})
-		It("labels should be equal", func() {
-			Expect(expect.Labels).To(Equal(cm.Labels))
-		})
-	})
-
-	Context("Assert Default Spec Data", func() {
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-
-		It("default server config should be equal", func() {
-			Expect(expect.Data[configmap.ConfigDataKeyForServer]).To(Equal(cm.Data[configmap.ConfigDataKeyForServer]))
-		})
-		It("default logback should be equal", func() {
-			Expect(expect.Data[configmap.ConfigDataKeyForLogback]).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
-		})
-		It("default agent config should be equal", func() {
-			Expect(expect.Data[configmap.ConfigDataKeyForAgent]).To(Equal(cm.Data[configmap.ConfigDataKeyForAgent]))
-		})
-	})
-
-	Context("Assert Update Spec Data", func() {
-		cn.TypeMeta = metav1.TypeMeta{
-			Kind:       "ComputeNode",
-			APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-		}
-		cn.ObjectMeta = metav1.ObjectMeta{
-			Name:      "test_name",
-			Namespace: "test_namespace",
-			Labels: map[string]string{
-				"test_key": "test_value",
-			},
-			Annotations: map[string]string{
-				"test_anno_key": "test_anno_value",
-			},
-		}
-		cn.Spec.Bootstrap = v1alpha1.BootstrapConfig{
-			ServerConfig: v1alpha1.ServerConfig{
-				Authority: v1alpha1.ComputeNodeAuthority{
-					Users: []v1alpha1.ComputeNodeUser{
-						{
-							User:     "test_user@%",
-							Password: "test_password",
-						},
-					},
-					Privilege: v1alpha1.ComputeNodePrivilege{
-						Type: v1alpha1.AllPermitted,
-					},
-				},
-				Mode: v1alpha1.ComputeNodeServerMode{
-					Type: v1alpha1.ModeTypeCluster,
-					Repository: v1alpha1.Repository{
-						Type: v1alpha1.RepositoryTypeZookeeper,
-						Props: v1alpha1.Properties{
-							"test_repo_key": "test_repo_value",
-						},
-					},
-				},
-				Props: v1alpha1.Properties{
-					"test_prop_key": "test_prop_value",
-				},
-			},
-		}
-
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-		cm = configmap.UpdateComputeNodeConfigMap(cn, cm)
-		cfg := &v1alpha1.ServerConfig{}
-		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &cfg)
-		if err != nil {
-			fmt.Printf("Err: %s\n", err)
-		}
-
-		It("server config should be equal", func() {
-			Expect(cfg.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
-			Expect(cfg.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
-			Expect(cfg.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
-		})
-		It("default logback should be equal", func() {
-			Expect(expect.Data[configmap.ConfigDataKeyForLogback]).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
-		})
-		It("default agent config should be equal", func() {
-			Expect(expect.Data[configmap.ConfigDataKeyForAgent]).To(Equal(cm.Data[configmap.ConfigDataKeyForAgent]))
-		})
-	})
-})
-
-var _ = Describe("Standalone Server Config", func() {
-	Context("Assert Simple Service Config Data", func() {
-		cn := &v1alpha1.ComputeNode{
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "ComputeNode",
-				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-			},
-			Spec: v1alpha1.ComputeNodeSpec{
-				Bootstrap: v1alpha1.BootstrapConfig{
-					ServerConfig: v1alpha1.ServerConfig{
-						Mode: v1alpha1.ComputeNodeServerMode{
-							Type: v1alpha1.ModeTypeStandalone,
-						},
-					},
-				},
-			},
-		}
-
-		expect := &v1alpha1.ServerConfig{}
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
-		if err != nil {
-			fmt.Printf("Err: %s\n", err)
-		}
-
-		It("server config authority should be equal", func() {
-			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
-		})
-		It("server config mode should be equal", func() {
-			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
-		})
-		It("server config props should be equal", func() {
-			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
-		})
-	})
-
-	Context("Assert Full Service Config Data", func() {
-		cn := &v1alpha1.ComputeNode{
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "ComputeNode",
-				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-			},
-			Spec: v1alpha1.ComputeNodeSpec{
-				Bootstrap: v1alpha1.BootstrapConfig{
-					ServerConfig: v1alpha1.ServerConfig{
-						Authority: v1alpha1.ComputeNodeAuthority{
-							Users: []v1alpha1.ComputeNodeUser{
-								{
-									User:     "test_user@%",
-									Password: "test_password",
-								},
-							},
-							Privilege: v1alpha1.ComputeNodePrivilege{
-								Type: v1alpha1.AllPermitted,
-							},
-						},
-						Mode: v1alpha1.ComputeNodeServerMode{
-							Type: v1alpha1.ModeTypeStandalone,
-						},
-						Props: v1alpha1.Properties{
-							"test_prop_key": "test_prop_value",
-						},
-					},
-				},
-			},
-		}
-
-		expect := &v1alpha1.ServerConfig{}
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
-		if err != nil {
-			fmt.Printf("Err: %s\n", err)
-		}
-		It("server config authority should be equal", func() {
-			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
-		})
-		It("server config mode should be equal", func() {
-			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
-		})
-		It("server config props should be equal", func() {
-			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
-		})
-	})
-})
-
-var _ = Describe("Cluster Server Config", func() {
-	var (
-		expect = &v1alpha1.ServerConfig{}
-		cn     = &v1alpha1.ComputeNode{
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "ComputeNode",
-				APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-			},
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "test_name",
-				Namespace: "test_namespace",
-				Labels: map[string]string{
-					"test_key": "test_value",
-				},
-			},
-			Spec: v1alpha1.ComputeNodeSpec{
-				Bootstrap: v1alpha1.BootstrapConfig{
-					ServerConfig: v1alpha1.ServerConfig{
-						Authority: v1alpha1.ComputeNodeAuthority{
-							Users: []v1alpha1.ComputeNodeUser{
-								{
-									User:     "test_user@%",
-									Password: "test_password",
-								},
-							},
-							Privilege: v1alpha1.ComputeNodePrivilege{
-								Type: v1alpha1.AllPermitted,
-							},
-						},
-						Mode: v1alpha1.ComputeNodeServerMode{
-							Type: v1alpha1.ModeTypeCluster,
-							Repository: v1alpha1.Repository{
-								Type: v1alpha1.RepositoryTypeZookeeper,
-								Props: v1alpha1.Properties{
-									"test_repo_key": "test_repo_value",
-								},
-							},
-						},
-						Props: v1alpha1.Properties{
-							"test_prop_key": "test_prop_value",
-						},
-					},
-				},
-			},
-		}
-	)
-
-	BeforeEach(func() {
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-
-		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForServer]), &expect)
-		if err != nil {
-			fmt.Printf("Err: %s\n", err)
-		}
-	})
-
-	Context("Assert Service Config Data", func() {
-		It("server config authority should be equal", func() {
-			Expect(expect.Authority).To(Equal(cn.Spec.Bootstrap.ServerConfig.Authority))
-		})
-		It("server config mode should be equal", func() {
-			Expect(expect.Mode).To(Equal(cn.Spec.Bootstrap.ServerConfig.Mode))
-		})
-		It("server config props should be equal", func() {
-			Expect(expect.Props).To(Equal(cn.Spec.Bootstrap.ServerConfig.Props))
-		})
-	})
-})
-
-var _ = Describe("Logback Config", func() {
-	Context("Assert Logback Config Data From Annotations", func() {
-		var (
-			expect = ""
-			cn     = &v1alpha1.ComputeNode{
-				TypeMeta: metav1.TypeMeta{
-					Kind:       "ComputeNode",
-					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-				},
-				ObjectMeta: metav1.ObjectMeta{
-					Annotations: map[string]string{
-						configmap.AnnoLogbackConfig: "test_logback_value",
-					},
-				},
-				Spec: v1alpha1.ComputeNodeSpec{
-					Bootstrap: v1alpha1.BootstrapConfig{
-						LogbackConfig: configmap.DefaultLogback,
-					},
-				},
-			}
-		)
-
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-		expect = "test_logback_value"
-
-		It("Logback config should be equal", func() {
-			Expect(expect).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
-		})
-	})
-
-	Context("Assert Logback Config Data", func() {
-		var (
-			expect = ""
-			cn     = &v1alpha1.ComputeNode{
-				TypeMeta: metav1.TypeMeta{
-					Kind:       "ComputeNode",
-					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-				},
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "test_name",
-					Namespace: "test_namespace",
-					Labels: map[string]string{
-						"test_key": "test_value",
-					},
-				},
-				Spec: v1alpha1.ComputeNodeSpec{
-					Bootstrap: v1alpha1.BootstrapConfig{
-						LogbackConfig: configmap.DefaultLogback,
-					},
-				},
-			}
-		)
-
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-		expect = configmap.DefaultLogback
-
-		It("Logback config should be equal", func() {
-			Expect(expect).To(Equal(cm.Data[configmap.ConfigDataKeyForLogback]))
-		})
-	})
-})
-
-var _ = Describe("Agent Config", func() {
-	Context("Assert Full Agent Config Data", func() {
-		var (
-			expect = &v1alpha1.AgentConfig{}
-			cn     = &v1alpha1.ComputeNode{
-				TypeMeta: metav1.TypeMeta{
-					Kind:       "ComputeNode",
-					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-				},
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "test_name",
-					Namespace: "test_namespace",
-					Labels: map[string]string{
-						"test_key": "test_value",
-					},
-				},
-				Spec: v1alpha1.ComputeNodeSpec{
-					Bootstrap: v1alpha1.BootstrapConfig{
-						AgentConfig: v1alpha1.AgentConfig{
-							Plugins: v1alpha1.AgentPlugin{
-								Logging: &v1alpha1.PluginLogging{
-									File: v1alpha1.LoggingFile{
-										Props: v1alpha1.Properties{
-											"test_logging_key": "test_logging_value",
-										},
-									},
-								},
-								Metrics: &v1alpha1.PluginMetrics{
-									Prometheus: v1alpha1.Prometheus{
-										Host: "test_host",
-										Port: 1234,
-										Props: v1alpha1.Properties{
-											"test_metrics_key": "test_metrics_value",
-										},
-									},
-								},
-								Tracing: &v1alpha1.PluginTracing{
-									OpenTracing: v1alpha1.OpenTracing{
-										Props: v1alpha1.Properties{
-											"test_opentracing_key": "test_opentracing_value",
-										},
-									},
-									OpenTelemetry: v1alpha1.OpenTelemetry{
-										Props: v1alpha1.Properties{
-											"test_opentelemetry_key": "test_opentelemetry_value",
-										},
-									},
-								},
-							},
-						},
-					},
-				},
-			}
-		)
-
-		c := configmap.NewConfigMapClient(nil)
-		cm := c.Build(context.TODO(), cn)
-
-		err := yaml.Unmarshal([]byte(cm.Data[configmap.ConfigDataKeyForAgent]), &expect)
-		if err != nil {
-			fmt.Printf("Err: %s\n", err)
-		}
-
-		It("agent config plugins should be equal", func() {
-			Expect(expect.Plugins).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins))
-		})
-		It("agent config logging should be equal", func() {
-			Expect(expect.Plugins.Logging).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Logging))
-		})
-		It("agent config metrics should be equal", func() {
-			Expect(expect.Plugins.Metrics).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Metrics))
-		})
-		It("agent config tracing should be equal", func() {
-			Expect(expect.Plugins.Tracing).To(Equal(cn.Spec.Bootstrap.AgentConfig.Plugins.Tracing))
-		})
-	})
-})
-
-var _ = Describe("GetNamespacedByName", func() {
-	Context("Assert Get ConfigMap ", func() {
-		var (
-			cn = &v1alpha1.ComputeNode{
-				TypeMeta: metav1.TypeMeta{
-					Kind:       "ComputeNode",
-					APIVersion: fmt.Sprintf("%s/%s", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version),
-				},
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "test",
-					Namespace: "default",
-					Labels: map[string]string{
-						"test_key": "test_value",
-					},
-				},
-				Spec: v1alpha1.ComputeNodeSpec{
-					Bootstrap: v1alpha1.BootstrapConfig{
-						AgentConfig: v1alpha1.AgentConfig{
-							Plugins: v1alpha1.AgentPlugin{
-								Logging: &v1alpha1.PluginLogging{
-									File: v1alpha1.LoggingFile{
-										Props: v1alpha1.Properties{
-											"test_logging_key": "test_logging_value",
-										},
-									},
-								},
-								Metrics: &v1alpha1.PluginMetrics{
-									Prometheus: v1alpha1.Prometheus{
-										Host: "test_host",
-										Port: 1234,
-										Props: v1alpha1.Properties{
-											"test_metrics_key": "test_metrics_value",
-										},
-									},
-								},
-								Tracing: &v1alpha1.PluginTracing{
-									OpenTracing: v1alpha1.OpenTracing{
-										Props: v1alpha1.Properties{
-											"test_opentracing_key": "test_opentracing_value",
-										},
-									},
-									OpenTelemetry: v1alpha1.OpenTelemetry{
-										Props: v1alpha1.Properties{
-											"test_opentelemetry_key": "test_opentelemetry_value",
-										},
-									},
-								},
-							},
-						},
-					},
-				},
-			}
-		)
-
-		It("get configmap should be equal", func() {
-			c := configmap.NewConfigMapClient(k8sClient)
-
-			cm := c.Build(context.TODO(), cn)
-			err := c.Create(context.TODO(), cm)
-			Expect(err).To(BeNil())
-
-			expect, err := c.GetByNamespacedName(context.TODO(), types.NamespacedName{
-				Name:      cn.Name,
-				Namespace: cn.Namespace,
-			})
-			Expect(expect).To(Not(BeNil()))
-
-			Expect(err).To(BeNil())
-			Expect(expect.Name).To(Equal(cm.Name))
-			Expect(expect.Namespace).To(Equal(cm.Namespace))
-			Expect(expect.Data).To(Equal(cm.Data))
-		})
-	})
-})
diff --git a/shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_suite_test.go b/shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_suite_test.go
deleted file mode 100644
index 6d1d624..0000000
--- a/shardingsphere-operator/pkg/reconcile/shardingspherechaos/shardingsphere_chaos_suite_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package shardingspherechaos_test
-
-import (
-	"context"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/cmd/shardingsphere-operator/manager"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
-	mockChaos "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/chaosmesh/mocks"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
-	"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/job"
-	chaosv1alpha1 "github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
-	"github.com/golang/mock/gomock"
-	. "github.com/onsi/ginkgo/v2"
-	. "github.com/onsi/gomega"
-	"github.com/onsi/gomega/gexec"
-	batchV1 "k8s.io/api/batch/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-	clientset "k8s.io/client-go/kubernetes"
-	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-	"k8s.io/client-go/rest"
-	"os"
-	"path/filepath"
-	ctrl "sigs.k8s.io/controller-runtime"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"sigs.k8s.io/controller-runtime/pkg/envtest"
-	logf "sigs.k8s.io/controller-runtime/pkg/log"
-	"sigs.k8s.io/controller-runtime/pkg/log/zap"
-	"testing"
-	"time"
-)
-
-func TestShardingSphereChaos(t *testing.T) {
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "ShardingSphereChaos Suite")
-}
-
-var (
-	cfg       *rest.Config
-	k8sClient client.Client
-	testEnv   *envtest.Environment
-	ctx       context.Context
-	cancel    context.CancelFunc
-	err       error
-)
-
-var _ = BeforeSuite(func() {
-	logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
-	ctx, cancel = context.WithCancel(context.TODO())
-
-	By("bootstrapping test environment")
-	testEnv = &envtest.Environment{
-		CRDDirectoryPaths:     []string{filepath.Join("..", "..", "..", "config", "crd", "bases")},
-		ErrorIfCRDPathMissing: true,
-		CRDInstallOptions: envtest.CRDInstallOptions{
-			MaxTime: 60 * time.Second,
-		},
-	}
-	cfg, err = testEnv.Start()
-	Expect(err).NotTo(HaveOccurred())
-	Expect(cfg).NotTo(BeNil())
-
-	scheme := runtime.NewScheme()
-	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
-	utilruntime.Must(chaosv1alpha1.AddToScheme(scheme))
-	utilruntime.Must(v1alpha1.AddToScheme(scheme))
-	utilruntime.Must(batchV1.AddToScheme(scheme))
-	ctl := gomock.NewController(GinkgoT())
-	k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
-	Expect(err).NotTo(HaveOccurred())
-	Expect(k8sClient).NotTo(BeNil())
-
-	os.Args = append(os.Args, "--metrics-bind-address=:9082")
-	os.Args = append(os.Args, "--health-probe-bind-address=:9083")
-
-	opt := manager.ParseOptionsFromCmdFlags()
-	opt.Scheme = scheme
-	k8sManager, err := ctrl.NewManager(cfg, opt.Options)
-
-	Expect(err).ToNot(HaveOccurred())
-	clientset, err := clientset.NewForConfig(k8sManager.GetConfig())
-	Expect(err).ToNot(HaveOccurred())
-	mockChaosMesh := mockChaos.NewMockChaos(ctl)
-	err = (&controllers.ShardingSphereChaosReconciler{
-		Client:    k8sManager.GetClient(),
-		Scheme:    k8sManager.GetScheme(),
-		Log:       logf.Log,
-		Chaos:     mockChaosMesh,
-		Job:       job.NewJob(k8sManager.GetClient()),
-		ConfigMap: configmap.NewConfigMapClient(k8sManager.GetClient()),
-		Events:    k8sManager.GetEventRecorderFor("shardingsphere-chaos-controller"),
-		ClientSet: clientset,
-	}).SetupWithManager(k8sManager)
-	Expect(err).ToNot(HaveOccurred())
-
-	//+kubebuilder:scaffold:scheme
-	go func() {
-		defer GinkgoRecover()
-		err = k8sManager.Start(ctrl.SetupSignalHandler())
-		Expect(err).ToNot(HaveOccurred(), "failed to run manager")
-		gexec.KillAndWait(4 * time.Second)
-
-		// Teardown the test environment once controller is fnished.
-		// Otherwise from Kubernetes 1.21+, teardon timeouts waiting on
-		// kube-apiserver to return
-		err := testEnv.Stop()
-		Expect(err).ToNot(HaveOccurred())
-	}()
-})
-
-var _ = AfterSuite(func() {
-	cancel()
-})