You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by mi...@apache.org on 2023/06/02 14:02:02 UTC
[shardingsphere-on-cloud] branch main updated: feat(storage-node): add CRD StorageProvider to replace DatabaseClass
This is an automated email from the ASF dual-hosted git repository.
miaoliyao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git
The following commit(s) were added to refs/heads/main by this push:
new d0f166b feat(storage-node): add CRD StorageProvider to replace DatabaseClass
new 0ace6b0 Merge pull request #387 from Xu-Wentao/storage-node
d0f166b is described below
commit d0f166b7bbb3b4a7387a572ae38922d85e8dae3f
Author: xuwentao <cu...@yahoo.com>
AuthorDate: Fri Jun 2 13:54:50 2023 +0800
feat(storage-node): add CRD StorageProvider to replace DatabaseClass
---
...ngsphereproxy-storagenode-aws-rds-instance.yaml | 6 +-
shardingsphere-operator/PROJECT | 12 +++
.../api/v1alpha1/storage_node_types.go | 3 +-
.../api/v1alpha1/storageprovider_types.go | 98 ++++++++++++++++++++++
.../api/v1alpha1/zz_generated.deepcopy.go | 96 +++++++++++++++++++++
shardingsphere-operator/hack/boilerplate.go.txt | 29 +++----
.../controllers/storage_ndoe_controller_test.go | 72 ++++++++--------
.../pkg/controllers/storage_node_controller.go | 81 +++++++++---------
.../pkg/reconcile/storagenode/aws/aurora.go | 2 +-
.../pkg/reconcile/storagenode/aws/aws.go | 5 +-
.../pkg/reconcile/storagenode/aws/mocks/aws.go | 17 ++--
.../pkg/reconcile/storagenode/aws/rdsinstance.go | 24 +++---
.../reconcile/storagenode/aws/rdsinstance_test.go | 9 +-
shardingsphere-operator/test/e2e/e2e_suite_test.go | 24 +-----
.../test/e2e/storage_node_controller_test.go | 48 +++++------
15 files changed, 354 insertions(+), 172 deletions(-)
diff --git a/examples/operator/shardingsphereproxy-storagenode-aws-rds-instance.yaml b/examples/operator/shardingsphereproxy-storagenode-aws-rds-instance.yaml
index fcc94f2..3fa0d27 100644
--- a/examples/operator/shardingsphereproxy-storagenode-aws-rds-instance.yaml
+++ b/examples/operator/shardingsphereproxy-storagenode-aws-rds-instance.yaml
@@ -16,12 +16,12 @@
#
---
-apiVersion: core.database-mesh.io/v1alpha1
-kind: DatabaseClass
+apiVersion: shardingsphere.apache.org/v1alpha1
+kind: StorageProvider
metadata:
name: aws-rds-instance-mysql-5.7
spec:
- provisioner: databaseclass.database-mesh.io/aws-rds-instance
+ provisioner: storageproviders.shardingsphere.apache.org/aws-rds-instance
reclaimPolicy: Delete
parameters:
allocatedStorage: "20"
diff --git a/shardingsphere-operator/PROJECT b/shardingsphere-operator/PROJECT
index 7a4f74a..3bcef51 100644
--- a/shardingsphere-operator/PROJECT
+++ b/shardingsphere-operator/PROJECT
@@ -1,3 +1,7 @@
+# Code generated by tool. DO NOT EDIT.
+# This file is used to track the info used to scaffold your project
+# and allow the plugins properly work.
+# More info: https://book.kubebuilder.io/reference/project-config.html
domain: apache.org
layout:
- go.kubebuilder.io/v3
@@ -26,4 +30,12 @@ resources:
kind: ShardingSphereProxyServerConfig
path: apache.org/shardingsphere-operator/api/v1alpha1
version: v1alpha1
+- api:
+ crdVersion: v1
+ namespaced: true
+ domain: apache.org
+ group: shardingsphere
+ kind: StorageProvider
+ path: apache.org/shardingsphere-operator/api/v1alpha1
+ version: v1alpha1
version: "3"
diff --git a/shardingsphere-operator/api/v1alpha1/storage_node_types.go b/shardingsphere-operator/api/v1alpha1/storage_node_types.go
index f3bc6df..165f7a7 100644
--- a/shardingsphere-operator/api/v1alpha1/storage_node_types.go
+++ b/shardingsphere-operator/api/v1alpha1/storage_node_types.go
@@ -115,8 +115,7 @@ type StorageNode struct {
// StorageNodeSpec defines the desired state of a set of storage units
type StorageNodeSpec struct {
// +kubebuilder:validation:Required
- // +kubebuilder:validation:DatabaseClass defined by: https://github.com/database-mesh/golang-sdk/blob/main/kubernetes/api/v1alpha1/databaseclass.go
- DatabaseClassName string `json:"databaseClassName"`
+ StorageProviderName string `json:"storageProviderName"`
// +optional
Schema string `json:"schema"`
}
diff --git a/shardingsphere-operator/api/v1alpha1/storageprovider_types.go b/shardingsphere-operator/api/v1alpha1/storageprovider_types.go
new file mode 100644
index 0000000..36f2b0a
--- /dev/null
+++ b/shardingsphere-operator/api/v1alpha1/storageprovider_types.go
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+const (
+ AnnotationsVPCSecurityGroupIds = "storageproviders.shardingsphere.apache.org/vpc-security-group-ids"
+ AnnotationsSubnetGroupName = "storageproviders.shardingsphere.apache.org/vpc-subnet-group-name"
+ AnnotationsAvailabilityZones = "storageproviders.shardingsphere.apache.org/availability-zones"
+ AnnotationsClusterIdentifier = "storageproviders.shardingsphere.apache.org/cluster-identifier"
+ AnnotationsInstanceIdentifier = "storageproviders.shardingsphere.apache.org/instance-identifier"
+ AnnotationsInstanceDBName = "storageproviders.shardingsphere.apache.org/instance-db-name"
+ AnnotationsSnapshotIdentifier = "storageproviders.shardingsphere.apache.org/snapshot-identifier"
+ AnnotationsMasterUsername = "storageproviders.shardingsphere.apache.org/master-username"
+ AnnotationsMasterUserPassword = "storageproviders.shardingsphere.apache.org/master-user-password"
+
+ ProvisionerAWSRDSInstance = "storageproviders.shardingsphere.apache.org/aws-rds-instance"
+ ProvisionerAWSRDSCluster = "storageproviders.shardingsphere.apache.org/aws-rds-cluster"
+ ProvisionerAWSAurora = "storageproviders.shardingsphere.apache.org/aws-aurora"
+)
+
+// StorageReclaimPolicy defines the reclaim policy for storage
+type StorageReclaimPolicy string
+
+const (
+ // StorageReclaimPolicyDeleteWithFinalSnapshot The database will be deleted with a final snapshot reserved.
+ StorageReclaimPolicyDeleteWithFinalSnapshot StorageReclaimPolicy = "DeleteWithFinalSnapshot"
+ // StorageReclaimPolicyDelete The database will be deleted.
+ StorageReclaimPolicyDelete StorageReclaimPolicy = "Delete"
+ // StorageReclaimPolicyRetain The database will be retained.
+ // The default policy is Retain.
+ StorageReclaimPolicyRetain StorageReclaimPolicy = "Retain"
+)
+
+// StorageProviderSpec defines the desired state of StorageProvider
+type StorageProviderSpec struct {
+ Provisioner string `json:"provisioner"`
+ Parameters map[string]string `json:"parameters"`
+
+ //+kubebuilder:validation:Optional
+ //+kubebuilder:validation:Enum=DeleteWithFinalSnapshot;Delete;Retain
+ //+kubebuilder:default:=Retain
+ //+optional
+ ReclaimPolicy StorageReclaimPolicy `json:"reclaimPolicy,omitempty"`
+}
+
+// StorageProviderStatus defines the observed state of StorageProvider
+type StorageProviderStatus struct {
+ // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+ // Important: Run "make" to regenerate code after modifying this file
+}
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+//+kubebuilder:resource:scope=Cluster,shortName=sp
+
+// StorageProvider is the Schema for the storageproviders API
+type StorageProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Status StorageProviderStatus `json:"status,omitempty"`
+ Spec StorageProviderSpec `json:"spec,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// StorageProviderList contains a list of StorageProvider
+type StorageProviderList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []StorageProvider `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&StorageProvider{}, &StorageProviderList{})
+}
diff --git a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
index 6df59eb..81f7993 100644
--- a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -1767,6 +1767,102 @@ func (in *StorageNodeStatus) DeepCopy() *StorageNodeStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageProvider) DeepCopyInto(out *StorageProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Status = in.Status
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProvider.
+func (in *StorageProvider) DeepCopy() *StorageProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageProviderList) DeepCopyInto(out *StorageProviderList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StorageProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProviderList.
+func (in *StorageProviderList) DeepCopy() *StorageProviderList {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageProviderList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageProviderList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageProviderSpec) DeepCopyInto(out *StorageProviderSpec) {
+ *out = *in
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProviderSpec.
+func (in *StorageProviderSpec) DeepCopy() *StorageProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageProviderStatus) DeepCopyInto(out *StorageProviderStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProviderStatus.
+func (in *StorageProviderStatus) DeepCopy() *StorageProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *User) DeepCopyInto(out *User) {
*out = *in
diff --git a/shardingsphere-operator/hack/boilerplate.go.txt b/shardingsphere-operator/hack/boilerplate.go.txt
index 29c55ec..d973dce 100644
--- a/shardingsphere-operator/hack/boilerplate.go.txt
+++ b/shardingsphere-operator/hack/boilerplate.go.txt
@@ -1,15 +1,16 @@
/*
-Copyright 2022.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
\ No newline at end of file
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
\ No newline at end of file
diff --git a/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go b/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
index d165f62..f1c0715 100644
--- a/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
+++ b/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
@@ -31,7 +31,6 @@ import (
"bou.ke/monkey"
dbmesh_aws "github.com/database-mesh/golang-sdk/aws"
dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -49,7 +48,7 @@ import (
const (
defaultTestNamespace = "test-namespace"
- defaultTestDBClass = "test-database-class"
+ defaultTestStorageProvider = "test-storage-provider"
defaultTestStorageNode = "test-storage-node"
defaultTestInstanceIdentifier = "test-database-instance"
)
@@ -67,7 +66,6 @@ func fakeStorageNodeReconciler() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
scheme := runtime.NewScheme()
- Expect(dbmeshv1alpha1.AddToScheme(scheme)).To(Succeed())
Expect(v1alpha1.AddToScheme(scheme)).To(Succeed())
Expect(corev1.AddToScheme(scheme)).To(Succeed())
fakeClient = fake.NewClientBuilder().WithScheme(scheme).Build()
@@ -97,12 +95,12 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
})
// create default resource
- dbClass := &dbmeshv1alpha1.DatabaseClass{
+ dbClass := &v1alpha1.StorageProvider{
ObjectMeta: metav1.ObjectMeta{
- Name: defaultTestDBClass,
+ Name: defaultTestStorageProvider,
},
- Spec: dbmeshv1alpha1.DatabaseClassSpec{
- Provisioner: dbmeshv1alpha1.ProvisionerAWSRDSInstance,
+ Spec: v1alpha1.StorageProviderSpec{
+ Provisioner: v1alpha1.ProvisionerAWSRDSInstance,
},
}
@@ -111,11 +109,11 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Name: defaultTestStorageNode,
Namespace: defaultTestNamespace,
Annotations: map[string]string{
- dbmeshv1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
+ v1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
}
Expect(fakeClient.Create(ctx, dbClass)).Should(Succeed())
@@ -130,9 +128,9 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Namespace: defaultTestNamespace,
},
})).Should(Succeed())
- Expect(fakeClient.Delete(ctx, &dbmeshv1alpha1.DatabaseClass{
+ Expect(fakeClient.Delete(ctx, &v1alpha1.StorageProvider{
ObjectMeta: metav1.ObjectMeta{
- Name: defaultTestDBClass,
+ Name: defaultTestStorageProvider,
},
})).Should(Succeed())
@@ -148,7 +146,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Namespace: defaultTestNamespace,
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
Status: v1alpha1.StorageNodeStatus{},
}
@@ -167,7 +165,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Namespace: defaultTestNamespace,
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: "no-database",
+ StorageProviderName: "no-database",
},
Status: v1alpha1.StorageNodeStatus{},
}
@@ -266,10 +264,10 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Name: deletingStorageNode,
Namespace: defaultTestNamespace,
Annotations: map[string]string{
- dbmeshv1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
+ v1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
},
},
- Spec: v1alpha1.StorageNodeSpec{DatabaseClassName: defaultTestDBClass},
+ Spec: v1alpha1.StorageNodeSpec{StorageProviderName: defaultTestStorageProvider},
}
Expect(fakeClient.Create(ctx, readyStorageNode)).Should(Succeed())
// mock aws rds client, get instance and return available status
@@ -318,7 +316,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
DeletionTimestamp: &deleteTime,
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
Status: v1alpha1.StorageNodeStatus{
Phase: v1alpha1.StorageNodePhaseDeleting,
@@ -363,7 +361,7 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
DeletionTimestamp: &deleteTime,
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
Status: v1alpha1.StorageNodeStatus{
Phase: v1alpha1.StorageNodePhaseDeleteComplete,
@@ -404,15 +402,15 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Name: nodeName,
Namespace: defaultTestNamespace,
Annotations: map[string]string{
- AnnotationKeyRegisterStorageUnitEnabled: "true",
- dbmeshv1alpha1.AnnotationsInstanceDBName: "test_db",
- AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
- AnnotationKeyComputeNodeName: cnName,
- AnnotationKeyLogicDatabaseName: "sharding_db",
+ AnnotationKeyRegisterStorageUnitEnabled: "true",
+ v1alpha1.AnnotationsInstanceDBName: "test_db",
+ AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
+ AnnotationKeyComputeNodeName: cnName,
+ AnnotationKeyLogicDatabaseName: "sharding_db",
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
}
ins := &dbmesh_rds.DescInstance{
@@ -618,15 +616,15 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Name: testName,
Namespace: defaultTestNamespace,
Annotations: map[string]string{
- AnnotationKeyComputeNodeName: testName,
- AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
- AnnotationKeyRegisterStorageUnitEnabled: "true",
- AnnotationKeyLogicDatabaseName: testName,
- dbmeshv1alpha1.AnnotationsInstanceDBName: testName,
+ AnnotationKeyComputeNodeName: testName,
+ AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
+ AnnotationKeyRegisterStorageUnitEnabled: "true",
+ AnnotationKeyLogicDatabaseName: testName,
+ v1alpha1.AnnotationsInstanceDBName: testName,
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: defaultTestDBClass,
+ StorageProviderName: defaultTestStorageProvider,
},
Status: v1alpha1.StorageNodeStatus{
Phase: v1alpha1.StorageNodePhaseReady,
@@ -639,12 +637,12 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
},
}
- dbClass := &dbmeshv1alpha1.DatabaseClass{
+ dbClass := &v1alpha1.StorageProvider{
ObjectMeta: metav1.ObjectMeta{
- Name: defaultTestDBClass,
+ Name: defaultTestStorageProvider,
},
- Spec: dbmeshv1alpha1.DatabaseClassSpec{
- Provisioner: dbmeshv1alpha1.ProvisionerAWSRDSInstance,
+ Spec: v1alpha1.StorageProviderSpec{
+ Provisioner: v1alpha1.ProvisionerAWSRDSInstance,
Parameters: map[string]string{
"masterUsername": testName,
"masterUserPassword": testName,
@@ -718,10 +716,10 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
Name: testName,
Namespace: defaultTestNamespace,
Annotations: map[string]string{
- AnnotationKeyLogicDatabaseName: testName,
- dbmeshv1alpha1.AnnotationsInstanceDBName: testName,
- AnnotationKeyComputeNodeName: testName,
- AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
+ AnnotationKeyLogicDatabaseName: testName,
+ v1alpha1.AnnotationsInstanceDBName: testName,
+ AnnotationKeyComputeNodeName: testName,
+ AnnotationKeyComputeNodeNamespace: defaultTestNamespace,
},
},
Status: v1alpha1.StorageNodeStatus{
diff --git a/shardingsphere-operator/pkg/controllers/storage_node_controller.go b/shardingsphere-operator/pkg/controllers/storage_node_controller.go
index 6ac2242..3c4795c 100644
--- a/shardingsphere-operator/pkg/controllers/storage_node_controller.go
+++ b/shardingsphere-operator/pkg/controllers/storage_node_controller.go
@@ -29,7 +29,6 @@ import (
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/shardingsphere"
"github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -67,7 +66,7 @@ type StorageNodeReconciler struct {
// +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=storagenodes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=storagenodes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=storagenodes/finalizers,verbs=update
-// +kubebuilder:rbac:groups=core.database-mesh.io,resources=databaseclasses,verbs=get;list;watch
+// +kubebuilder:rbac:groups=core.database-mesh.io,resources=storageProvideres,verbs=get;list;watch
// Reconcile handles main function of this controller
// nolint:gocognit
@@ -80,10 +79,10 @@ func (r *StorageNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, client.IgnoreNotFound(err)
}
- // Get databaseClass with storageNode.Spec.DatabaseClassName
- databaseClass, err := r.getDatabaseClass(ctx, node)
+ // Get storageProvider with storagenode.Spec.StorageProviderName
+ storageProvider, err := r.getstorageProvider(ctx, node)
if err != nil {
- r.Log.Error(err, fmt.Sprintf("unable to fetch DatabaseClass %s", node.Spec.DatabaseClassName))
+ r.Log.Error(err, fmt.Sprintf("unable to fetch storageProvider %s", node.Spec.StorageProviderName))
return ctrl.Result{Requeue: true}, err
}
@@ -98,14 +97,14 @@ func (r *StorageNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
}
} else if slices.Contains(node.ObjectMeta.Finalizers, FinalizerName) {
- return r.finalize(ctx, node, databaseClass)
+ return r.finalize(ctx, node, storageProvider)
}
// reconcile storage node
- return r.reconcile(ctx, databaseClass, node)
+ return r.reconcile(ctx, storageProvider, node)
}
-func (r *StorageNodeReconciler) finalize(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) (ctrl.Result, error) {
+func (r *StorageNodeReconciler) finalize(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) (ctrl.Result, error) {
var err error
switch node.Status.Phase {
case v1alpha1.StorageNodePhaseReady, v1alpha1.StorageNodePhaseNotReady:
@@ -129,7 +128,7 @@ func (r *StorageNodeReconciler) finalize(ctx context.Context, node *v1alpha1.Sto
return ctrl.Result{RequeueAfter: defaultRequeueTime}, err
}
- if err = r.deleteDatabaseCluster(ctx, node, databaseClass); err != nil {
+ if err = r.deleteDatabaseCluster(ctx, node, storageProvider); err != nil {
r.Log.Error(err, "failed to delete database cluster")
return ctrl.Result{RequeueAfter: defaultRequeueTime}, err
}
@@ -148,15 +147,15 @@ func (r *StorageNodeReconciler) finalize(ctx context.Context, node *v1alpha1.Sto
return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil
}
-func (r *StorageNodeReconciler) reconcile(ctx context.Context, dbClass *dbmeshv1alpha1.DatabaseClass, node *v1alpha1.StorageNode) (ctrl.Result, error) {
- // reconcile storage node with databaseClass
+func (r *StorageNodeReconciler) reconcile(ctx context.Context, dbClass *v1alpha1.StorageProvider, node *v1alpha1.StorageNode) (ctrl.Result, error) {
+ // reconcile storage node with storageProvider
switch dbClass.Spec.Provisioner {
- case dbmeshv1alpha1.ProvisionerAWSRDSInstance:
+ case v1alpha1.ProvisionerAWSRDSInstance:
if err := r.reconcileAwsRdsInstance(ctx, aws.NewRdsClient(r.AwsRDS), node, dbClass); err != nil {
r.Log.Error(err, fmt.Sprintf("unable to reconcile AWS RDS Instance %s/%s, err:%s", node.GetNamespace(), node.GetName(), err.Error()))
r.Recorder.Eventf(node, corev1.EventTypeWarning, "Reconcile Failed", fmt.Sprintf("unable to reconcile AWS RDS Instance %s/%s, err:%s", node.GetNamespace(), node.GetName(), err.Error()))
}
- case dbmeshv1alpha1.ProvisionerAWSAurora:
+ case v1alpha1.ProvisionerAWSAurora:
if err := r.reconcileAwsAurora(ctx, aws.NewRdsClient(r.AwsRDS), node, dbClass); err != nil {
r.Recorder.Eventf(node, corev1.EventTypeWarning, "Reconcile Failed", fmt.Sprintf("unable to reconcile AWS Aurora %s/%s, err:%s", node.GetNamespace(), node.GetName(), err.Error()))
}
@@ -185,30 +184,30 @@ func (r *StorageNodeReconciler) reconcile(ctx context.Context, dbClass *dbmeshv1
return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil
}
-func (r *StorageNodeReconciler) getDatabaseClass(ctx context.Context, node *v1alpha1.StorageNode) (databaseClass *dbmeshv1alpha1.DatabaseClass, err error) {
- if node.Spec.DatabaseClassName == "" {
- r.Recorder.Event(node, corev1.EventTypeWarning, "DatabaseClassNameIsNil", "DatabaseClassName is nil")
- return nil, fmt.Errorf("DatabaseClassName is nil")
+func (r *StorageNodeReconciler) getstorageProvider(ctx context.Context, node *v1alpha1.StorageNode) (storageProvider *v1alpha1.StorageProvider, err error) {
+ if node.Spec.StorageProviderName == "" {
+ r.Recorder.Event(node, corev1.EventTypeWarning, "storageProviderNameIsNil", "storageProviderName is nil")
+ return nil, fmt.Errorf("storageProviderName is nil")
}
- databaseClass = &dbmeshv1alpha1.DatabaseClass{}
+ storageProvider = &v1alpha1.StorageProvider{}
- if err := r.Get(ctx, client.ObjectKey{Name: node.Spec.DatabaseClassName}, databaseClass); err != nil {
- r.Log.Error(err, fmt.Sprintf("unable to fetch DatabaseClass %s", node.Spec.DatabaseClassName))
- r.Recorder.Event(node, corev1.EventTypeWarning, "DatabaseClassNotFound", fmt.Sprintf("DatabaseClass %s not found", node.Spec.DatabaseClassName))
+ if err := r.Get(ctx, client.ObjectKey{Name: node.Spec.StorageProviderName}, storageProvider); err != nil {
+ r.Log.Error(err, fmt.Sprintf("unable to fetch storageProvider %s", node.Spec.StorageProviderName))
+ r.Recorder.Event(node, corev1.EventTypeWarning, "storageProviderNotFound", fmt.Sprintf("storageProvider %s not found", node.Spec.StorageProviderName))
return nil, err
}
// check provisioner
// aws-like provisioner need aws rds client
- if databaseClass.Spec.Provisioner == dbmeshv1alpha1.ProvisionerAWSRDSInstance || databaseClass.Spec.Provisioner == dbmeshv1alpha1.ProvisionerAWSAurora {
+ if storageProvider.Spec.Provisioner == v1alpha1.ProvisionerAWSRDSInstance || storageProvider.Spec.Provisioner == v1alpha1.ProvisionerAWSAurora {
if r.AwsRDS == nil {
r.Recorder.Event(node, corev1.EventTypeWarning, "AwsRdsClientIsNil", "aws rds client is nil, please check your aws credentials")
return nil, fmt.Errorf("aws rds client is nil, please check your aws credentials")
}
}
- return databaseClass, nil
+ return storageProvider, nil
}
// nolint:gocritic
@@ -313,7 +312,7 @@ func allInstancesReady(instances []v1alpha1.InstanceStatus) bool {
return true
}
-func (r *StorageNodeReconciler) reconcileAwsRdsInstance(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, dbClass *dbmeshv1alpha1.DatabaseClass) error {
+func (r *StorageNodeReconciler) reconcileAwsRdsInstance(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, dbClass *v1alpha1.StorageProvider) error {
instance, err := client.GetInstance(ctx, node)
if err != nil {
return err
@@ -363,7 +362,7 @@ func updateAWSRDSInstanceStatus(node *v1alpha1.StorageNode, instance *rds.DescIn
return nil
}
-func (r *StorageNodeReconciler) reconcileAwsAurora(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, dbClass *dbmeshv1alpha1.DatabaseClass) error {
+func (r *StorageNodeReconciler) reconcileAwsAurora(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, dbClass *v1alpha1.StorageProvider) error {
// get instance
aurora, err := client.GetAuroraCluster(ctx, node)
if err != nil {
@@ -424,34 +423,34 @@ func updateClusterStatus(ctx context.Context, node *v1alpha1.StorageNode, client
}
// deleteDatabaseCluster
-func (r *StorageNodeReconciler) deleteDatabaseCluster(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
- switch databaseClass.Spec.Provisioner {
- case dbmeshv1alpha1.ProvisionerAWSRDSInstance:
- if err := r.deleteAWSRDSInstance(ctx, aws.NewRdsClient(r.AwsRDS), node, databaseClass); err != nil {
+func (r *StorageNodeReconciler) deleteDatabaseCluster(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
+ switch storageProvider.Spec.Provisioner {
+ case v1alpha1.ProvisionerAWSRDSInstance:
+ if err := r.deleteAWSRDSInstance(ctx, aws.NewRdsClient(r.AwsRDS), node, storageProvider); err != nil {
return fmt.Errorf("delete aws rds instance failed: %w", err)
}
- case dbmeshv1alpha1.ProvisionerAWSAurora:
- if err := aws.NewRdsClient(r.AwsRDS).DeleteAuroraCluster(ctx, node, databaseClass); err != nil {
+ case v1alpha1.ProvisionerAWSAurora:
+ if err := aws.NewRdsClient(r.AwsRDS).DeleteAuroraCluster(ctx, node, storageProvider); err != nil {
return err
}
default:
- return fmt.Errorf("unsupported database provisioner %s", databaseClass.Spec.Provisioner)
+ return fmt.Errorf("unsupported database provisioner %s", storageProvider.Spec.Provisioner)
}
return nil
}
-func (r *StorageNodeReconciler) deleteAWSRDSInstance(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
+func (r *StorageNodeReconciler) deleteAWSRDSInstance(ctx context.Context, client aws.IRdsClient, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
instance, err := client.GetInstance(ctx, node)
if err != nil {
return err
}
if instance != nil && instance.DBInstanceStatus != rds.DBInstanceStatusDeleting {
- if err := client.DeleteInstance(ctx, node, databaseClass); err != nil {
- r.Recorder.Eventf(node, corev1.EventTypeWarning, "DeleteFailed", "Failed to delete instance %s: %s", node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier], err.Error())
+ if err := client.DeleteInstance(ctx, node, storageProvider); err != nil {
+ r.Recorder.Eventf(node, corev1.EventTypeWarning, "DeleteFailed", "Failed to delete instance %s: %s", node.Annotations[v1alpha1.AnnotationsInstanceIdentifier], err.Error())
return err
}
- r.Recorder.Event(node, corev1.EventTypeNormal, "Deleting", fmt.Sprintf("instance %s is deleting", node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]))
+ r.Recorder.Event(node, corev1.EventTypeNormal, "Deleting", fmt.Sprintf("instance %s is deleting", node.Annotations[v1alpha1.AnnotationsInstanceIdentifier]))
}
// update instance status
@@ -463,7 +462,7 @@ func (r *StorageNodeReconciler) deleteAWSRDSInstance(ctx context.Context, client
}
// registerStorageUnit
-func (r *StorageNodeReconciler) registerStorageUnit(ctx context.Context, node *v1alpha1.StorageNode, dbClass *dbmeshv1alpha1.DatabaseClass) error {
+func (r *StorageNodeReconciler) registerStorageUnit(ctx context.Context, node *v1alpha1.StorageNode, dbClass *v1alpha1.StorageProvider) error {
// if register storage unit is not enabled, return
if node.Annotations[AnnotationKeyRegisterStorageUnitEnabled] != "true" {
return nil
@@ -485,7 +484,7 @@ func (r *StorageNodeReconciler) registerStorageUnit(ctx context.Context, node *v
}
logicDBName := node.Annotations[AnnotationKeyLogicDatabaseName]
- dbName := node.Annotations[dbmeshv1alpha1.AnnotationsInstanceDBName]
+ dbName := node.Annotations[v1alpha1.AnnotationsInstanceDBName]
ssServer, err := r.getShardingsphereServer(ctx, node)
if err != nil {
@@ -504,11 +503,11 @@ func (r *StorageNodeReconciler) registerStorageUnit(ctx context.Context, node *v
ins := node.Status.Instances[0]
host := ins.Endpoint.Address
port := ins.Endpoint.Port
- username := node.Annotations[dbmeshv1alpha1.AnnotationsMasterUsername]
+ username := node.Annotations[v1alpha1.AnnotationsMasterUsername]
if username == "" {
username = dbClass.Spec.Parameters["masterUsername"]
}
- password := node.Annotations[dbmeshv1alpha1.AnnotationsMasterUserPassword]
+ password := node.Annotations[v1alpha1.AnnotationsMasterUserPassword]
if password == "" {
password = dbClass.Spec.Parameters["masterUserPassword"]
}
@@ -554,7 +553,7 @@ func (r *StorageNodeReconciler) unregisterStorageUnit(ctx context.Context, node
func (r *StorageNodeReconciler) validateComputeNodeAnnotations(node *v1alpha1.StorageNode) error {
requiredAnnos := []string{
AnnotationKeyLogicDatabaseName,
- dbmeshv1alpha1.AnnotationsInstanceDBName,
+ v1alpha1.AnnotationsInstanceDBName,
AnnotationKeyComputeNodeNamespace,
AnnotationKeyComputeNodeName,
}
diff --git a/shardingsphere-operator/pkg/reconcile/storagenode/aws/aurora.go b/shardingsphere-operator/pkg/reconcile/storagenode/aws/aurora.go
index 90721f9..7549192 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/aurora.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/aurora.go
@@ -47,7 +47,7 @@ func (c *RdsClient) GetAuroraCluster(ctx context.Context, node *v1alpha1.Storage
return aurora.Describe(ctx)
}
-func (c *RdsClient) DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
+func (c *RdsClient) DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
aurora := c.Aurora()
return aurora.Delete(ctx)
}
diff --git a/shardingsphere-operator/pkg/reconcile/storagenode/aws/aws.go b/shardingsphere-operator/pkg/reconcile/storagenode/aws/aws.go
index 1250e73..1a697a5 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/aws.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/aws.go
@@ -22,7 +22,6 @@ import (
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
)
type RdsClient struct {
@@ -37,11 +36,11 @@ type IRdsClient interface {
CreateInstance(ctx context.Context, node *v1alpha1.StorageNode, params map[string]string) error
GetInstance(ctx context.Context, node *v1alpha1.StorageNode) (instance *rds.DescInstance, err error)
GetInstanceByIdentifier(ctx context.Context, identifier string) (*rds.DescInstance, error)
- DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error
+ DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error
CreateAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, params map[string]string) error
GetAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode) (cluster *rds.DescCluster, err error)
- DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error
+ DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error
}
func NewRdsClient(rds rds.RDS) IRdsClient {
diff --git a/shardingsphere-operator/pkg/reconcile/storagenode/aws/mocks/aws.go b/shardingsphere-operator/pkg/reconcile/storagenode/aws/mocks/aws.go
index 3912391..2d9911c 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/mocks/aws.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/mocks/aws.go
@@ -10,7 +10,6 @@ import (
v1alpha1 "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
rds "github.com/database-mesh/golang-sdk/aws/client/rds"
- v1alpha10 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
@@ -94,31 +93,31 @@ func (mr *MockIRdsClientMockRecorder) CreateInstance(ctx, node, params interface
}
// DeleteAuroraCluster mocks base method.
-func (m *MockIRdsClient) DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *v1alpha10.DatabaseClass) error {
+func (m *MockIRdsClient) DeleteAuroraCluster(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteAuroraCluster", ctx, node, databaseClass)
+ ret := m.ctrl.Call(m, "DeleteAuroraCluster", ctx, node, storageProvider)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteAuroraCluster indicates an expected call of DeleteAuroraCluster.
-func (mr *MockIRdsClientMockRecorder) DeleteAuroraCluster(ctx, node, databaseClass interface{}) *gomock.Call {
+func (mr *MockIRdsClientMockRecorder) DeleteAuroraCluster(ctx, node, storageProvider interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAuroraCluster", reflect.TypeOf((*MockIRdsClient)(nil).DeleteAuroraCluster), ctx, node, databaseClass)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAuroraCluster", reflect.TypeOf((*MockIRdsClient)(nil).DeleteAuroraCluster), ctx, node, storageProvider)
}
// DeleteInstance mocks base method.
-func (m *MockIRdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *v1alpha10.DatabaseClass) error {
+func (m *MockIRdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteInstance", ctx, node, databaseClass)
+ ret := m.ctrl.Call(m, "DeleteInstance", ctx, node, storageProvider)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteInstance indicates an expected call of DeleteInstance.
-func (mr *MockIRdsClientMockRecorder) DeleteInstance(ctx, node, databaseClass interface{}) *gomock.Call {
+func (mr *MockIRdsClientMockRecorder) DeleteInstance(ctx, node, storageProvider interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstance", reflect.TypeOf((*MockIRdsClient)(nil).DeleteInstance), ctx, node, databaseClass)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstance", reflect.TypeOf((*MockIRdsClient)(nil).DeleteInstance), ctx, node, storageProvider)
}
// GetAuroraCluster mocks base method.
diff --git a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
index e54db6c..43f347d 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
@@ -27,8 +27,8 @@ import (
"strconv"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+
"github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
"github.com/database-mesh/golang-sdk/pkg/random"
)
@@ -64,7 +64,7 @@ func validCreateInstanceParams(node *v1alpha1.StorageNode, paramsptr *map[string
}
// validate instance identifier.
- if val, ok := node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]; !ok || val == "" {
+ if val, ok := node.Annotations[v1alpha1.AnnotationsInstanceIdentifier]; !ok || val == "" {
return errors.New("instance identifier is empty")
}
@@ -75,7 +75,7 @@ func validCreateInstanceParams(node *v1alpha1.StorageNode, paramsptr *map[string
if lp < 8 || lp > 41 {
return errors.New("master user password length should be greater than 8")
} else {
- node.Annotations[dbmeshv1alpha1.AnnotationsMasterUserPassword] = params["masterUserPassword"]
+ node.Annotations[v1alpha1.AnnotationsMasterUserPassword] = params["masterUserPassword"]
}
return nil
@@ -136,19 +136,19 @@ func (c *RdsClient) CreateInstance(ctx context.Context, node *v1alpha1.StorageNo
instance.SetEngine(params["engine"]).
SetEngineVersion(params["engineVersion"]).
SetDBInstanceClass(params["instanceClass"]).
- SetDBInstanceIdentifier(node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]).
+ SetDBInstanceIdentifier(node.Annotations[v1alpha1.AnnotationsInstanceIdentifier]).
SetMasterUsername(params["masterUsername"]).
SetMasterUserPassword(params["masterUserPassword"]).
SetAllocatedStorage(int32(storage))
// set database name if needed.
- if v, ok := params[node.Annotations[dbmeshv1alpha1.AnnotationsInstanceDBName]]; ok {
+ if v, ok := params[node.Annotations[v1alpha1.AnnotationsInstanceDBName]]; ok {
instance.SetDBName(v)
}
return instance.Create(ctx)
}
func (c *RdsClient) GetInstance(ctx context.Context, node *v1alpha1.StorageNode) (*rds.DescInstance, error) {
- identifier, ok := node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]
+ identifier, ok := node.Annotations[v1alpha1.AnnotationsInstanceIdentifier]
if !ok {
return nil, errors.New("instance identifier is empty")
}
@@ -165,7 +165,7 @@ func (c *RdsClient) GetInstanceByIdentifier(ctx context.Context, identifier stri
// DeleteInstance delete rds instance.
// aws rds instance status doc: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html
-func (c *RdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
+func (c *RdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNode, storageProvider *v1alpha1.StorageProvider) error {
// TODO add more test case.
/* TODO set options to skip final snapshot and backup stuff depends on database class ClaimPolicy.
"error": "operation error RDS: DeleteDBInstance,
@@ -175,7 +175,7 @@ func (c *RdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNo
FinalDBSnapshotIdentifier is required unless SkipFinalSnapshot is specified."
*/
- identifier, ok := node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]
+ identifier, ok := node.Annotations[v1alpha1.AnnotationsInstanceIdentifier]
if !ok {
return errors.New("instance identifier is empty")
}
@@ -194,12 +194,12 @@ func (c *RdsClient) DeleteInstance(ctx context.Context, node *v1alpha1.StorageNo
}
var isDeleteBackup, isSkipFinalSnapshot bool
- switch databaseClass.Spec.ReclaimPolicy {
- case dbmeshv1alpha1.DatabaseReclaimDeleteWithFinalSnapshot:
+ switch storageProvider.Spec.ReclaimPolicy {
+ case v1alpha1.StorageReclaimPolicyDeleteWithFinalSnapshot:
isDeleteBackup, isSkipFinalSnapshot = true, false
- case dbmeshv1alpha1.DatabaseReclaimDelete:
+ case v1alpha1.StorageReclaimPolicyDelete:
isDeleteBackup, isSkipFinalSnapshot = true, true
- case dbmeshv1alpha1.DatabaseReclaimRetain:
+ case v1alpha1.StorageReclaimPolicyRetain:
isDeleteBackup, isSkipFinalSnapshot = false, true
}
diff --git a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance_test.go b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance_test.go
index c2ee478..e710f08 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance_test.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance_test.go
@@ -19,7 +19,6 @@ package aws
import (
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -46,18 +45,18 @@ var _ = Describe("validCreateInstanceParams", func() {
It("should return true", func() {
- node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
+ node.Annotations[v1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
Expect(validCreateInstanceParams(node, ¶ms)).To(BeNil())
- Expect(node.Annotations[dbmeshv1alpha1.AnnotationsMasterUserPassword]).To(Equal("root123456"))
+ Expect(node.Annotations[v1alpha1.AnnotationsMasterUserPassword]).To(Equal("root123456"))
})
It("should return username contains invalid characters", func() {
params["masterUsername"] = "@masterUser"
- node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
+ node.Annotations[v1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
Expect(validCreateInstanceParams(node, ¶ms)).To(MatchError(ContainSubstring("username contains invalid characters")))
})
It("should handle multiple characters correctly", func() {
params["masterUsername"] = "test__test--"
- node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
+ node.Annotations[v1alpha1.AnnotationsInstanceIdentifier] = "test-instance"
Expect(validCreateInstanceParams(node, ¶ms)).To(BeNil())
Expect(params["masterUsername"]).To(Equal("test_test-"))
})
diff --git a/shardingsphere-operator/test/e2e/e2e_suite_test.go b/shardingsphere-operator/test/e2e/e2e_suite_test.go
index 97598cc..cc5830b 100644
--- a/shardingsphere-operator/test/e2e/e2e_suite_test.go
+++ b/shardingsphere-operator/test/e2e/e2e_suite_test.go
@@ -19,27 +19,24 @@ package e2e
import (
"context"
- "os/exec"
"path/filepath"
"testing"
"time"
- mockChaos "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/chaosmesh/mocks"
- "github.com/golang/mock/gomock"
- clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
+ mockChaos "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/chaosmesh/mocks"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/configmap"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/deployment"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/kubernetes/service"
dbmesh_aws "github.com/database-mesh/golang-sdk/aws"
dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
+ "github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -63,21 +60,7 @@ func TestControllers(t *testing.T) {
RunSpecs(t, "Controllers Suite")
}
-func loadOnlineCRDs() {
- urls := []string{
- // DatabaseClass CRD file
- "https://raw.githubusercontent.com/database-mesh/golang-sdk/main/config/crd/bases/core.database-mesh.io_databaseclasses.yaml",
- }
-
- filePath := filepath.Join("..", "..", "config", "crd", "bases")
- for _, url := range urls {
- Expect(exec.Command("wget", url, "-nc", "-P", filePath).Run()).Should(Succeed())
- }
-}
-
var _ = BeforeSuite(func() {
- loadOnlineCRDs()
-
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
@@ -100,7 +83,6 @@ var _ = BeforeSuite(func() {
Expect(cfg).NotTo(BeNil())
Expect(v1alpha1.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred())
- Expect(dbmeshv1alpha1.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred())
Expect(clientgoscheme.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
diff --git a/shardingsphere-operator/test/e2e/storage_node_controller_test.go b/shardingsphere-operator/test/e2e/storage_node_controller_test.go
index f526836..35b0283 100644
--- a/shardingsphere-operator/test/e2e/storage_node_controller_test.go
+++ b/shardingsphere-operator/test/e2e/storage_node_controller_test.go
@@ -24,31 +24,31 @@ import (
"regexp"
"time"
+ "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+ "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
+ "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws"
+
"bou.ke/monkey"
"github.com/DATA-DOG/go-sqlmock"
dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
- dbmeshv1alpha1 "github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
- "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
- "github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws"
)
-var _ = Describe("StorageNode Controller Suite Test", func() {
- databaseClassName := "test-database-class"
+var _ = Describe("StorageNode Controller Suite Test For AWS RDS Instance", func() {
+ storageProviderName := "test-storage-provider"
+ instanceIdentifier := "test-instance-identifier"
BeforeEach(func() {
- databaseClass := &dbmeshv1alpha1.DatabaseClass{
+ StorageProvider := &v1alpha1.StorageProvider{
ObjectMeta: metav1.ObjectMeta{
- Name: databaseClassName,
+ Name: storageProviderName,
},
- Spec: dbmeshv1alpha1.DatabaseClassSpec{
- Provisioner: dbmeshv1alpha1.ProvisionerAWSRDSInstance,
+ Spec: v1alpha1.StorageProviderSpec{
+ Provisioner: v1alpha1.ProvisionerAWSRDSInstance,
Parameters: map[string]string{
"engine": "mysql",
"engineVersion": "5.7",
@@ -60,13 +60,13 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
},
}
- Expect(k8sClient.Create(ctx, databaseClass)).Should(Succeed())
+ Expect(k8sClient.Create(ctx, StorageProvider)).Should(Succeed())
})
AfterEach(func() {
- databaseClass := &dbmeshv1alpha1.DatabaseClass{}
- Expect(k8sClient.Get(ctx, client.ObjectKey{Name: databaseClassName}, databaseClass)).Should(Succeed())
- Expect(k8sClient.Delete(ctx, databaseClass)).Should(Succeed())
+ StorageProvider := &v1alpha1.StorageProvider{}
+ Expect(k8sClient.Get(ctx, client.ObjectKey{Name: storageProviderName}, StorageProvider)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, StorageProvider)).Should(Succeed())
})
Context("reconcile storageNode", func() {
@@ -86,7 +86,7 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
}, nil
})
// mock delete instance func returns success
- monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance", func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _ *dbmeshv1alpha1.DatabaseClass) error {
+ monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance", func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _ *v1alpha1.StorageProvider) error {
return nil
})
@@ -96,11 +96,11 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
Name: nodeName,
Namespace: "default",
Annotations: map[string]string{
- dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance-identifier",
+ v1alpha1.AnnotationsInstanceIdentifier: instanceIdentifier,
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: databaseClassName,
+ StorageProviderName: storageProviderName,
},
}
@@ -125,11 +125,11 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
Name: nodeName,
Namespace: "default",
Annotations: map[string]string{
- dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance-identifier",
+ v1alpha1.AnnotationsInstanceIdentifier: instanceIdentifier,
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: databaseClassName,
+ StorageProviderName: storageProviderName,
},
}
Expect(k8sClient.Create(ctx, node)).Should(Succeed())
@@ -161,7 +161,7 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
},
}, nil
})
- monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance", func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _ *dbmeshv1alpha1.DatabaseClass) error {
+ monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance", func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _ *v1alpha1.StorageProvider) error {
return nil
})
monkey.Patch(sql.Open, func(_ string, _ string) (*sql.DB, error) {
@@ -227,16 +227,16 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
Name: nodeName,
Namespace: "default",
Annotations: map[string]string{
- dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance-identifier",
+ v1alpha1.AnnotationsInstanceIdentifier: instanceIdentifier,
controllers.AnnotationKeyRegisterStorageUnitEnabled: "true",
- dbmeshv1alpha1.AnnotationsInstanceDBName: "test-db-name",
+ v1alpha1.AnnotationsInstanceDBName: "test-db-name",
controllers.AnnotationKeyComputeNodeNamespace: "default",
controllers.AnnotationKeyComputeNodeName: "test-compute-node",
controllers.AnnotationKeyLogicDatabaseName: "test-logic-db-name",
},
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName: databaseClassName,
+ StorageProviderName: storageProviderName,
},
}