You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by ch...@apache.org on 2022/09/20 03:43:49 UTC
[dolphinscheduler-operator] 12/44: feat(operator): add api
This is an automated email from the ASF dual-hosted git repository.
chufenggao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler-operator.git
commit c5518951aee8b334d2058dbf8e332ac18ed0b0ee
Author: nobolity <no...@gmail.com>
AuthorDate: Sun May 29 21:49:46 2022 +0800
feat(operator): add api
---
PROJECT | 9 +
api/v1alpha1/ds_public.go | 6 +
api/v1alpha1/zz_generated.deepcopy.go | 89 ++
.../ds.apache.dolphinscheduler.dev_dsalerts.yaml | 1601 ++++++++++++++++++++
config/crd/kustomization.yaml | 3 +
config/crd/patches/cainjection_in_dsalerts.yaml | 7 +
config/crd/patches/webhook_in_dsalerts.yaml | 16 +
config/ds/alert/ds-alert-deployment.yaml | 30 -
config/ds/alert/ds-alert-service.yaml | 17 -
config/rbac/dsalert_editor_role.yaml | 24 +
config/rbac/dsalert_viewer_role.yaml | 20 +
config/rbac/role.yaml | 26 +
config/samples/ds_v1alpha1_dsalert.yaml | 16 +
controllers/alert_reconcile.go | 22 +-
.../{alert_reconcile.go => api_reconcile.go} | 36 +-
controllers/dsalert_controller.go | 221 +++
controllers/dsmaster_controller.go | 11 +-
controllers/dsworker_controller.go | 2 +-
controllers/suite_test.go | 2 +-
main.go | 7 +
20 files changed, 2080 insertions(+), 85 deletions(-)
diff --git a/PROJECT b/PROJECT
index 55173fc..14f9538 100644
--- a/PROJECT
+++ b/PROJECT
@@ -31,4 +31,13 @@ resources:
kind: DSAlert
path: dolphinscheduler-operator/api/v1alpha1
version: v1alpha1
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: apache.dolphinscheduler.dev
+ group: ds
+ kind: DSApi
+ path: dolphinscheduler-operator/api/v1alpha1
+ version: v1alpha1
version: "3"
diff --git a/api/v1alpha1/ds_public.go b/api/v1alpha1/ds_public.go
index bf82016..d468fca 100644
--- a/api/v1alpha1/ds_public.go
+++ b/api/v1alpha1/ds_public.go
@@ -30,12 +30,18 @@ const (
EnvZookeeper = "REGISTRY_ZOOKEEPER_CONNECT_STRING"
DsServiceLabel = "service-name"
DsServiceLabelValue = "ds-service"
+ DsAlert = "ds-alert"
DsAlertServiceValue = "ds-alert-service"
DsAlertDeploymentValue = "ds-alert-deployment"
+ DsApi = "ds-api"
+ DsApiServiceValue = "ds-api-service"
+ DsApiDeploymentValue = "ds-api-deployment"
DataSourceDriveName = "SPRING_DATASOURCE_DRIVER_CLASS_NAME"
DataSourceUrl = "SPRING_DATASOURCE_URL"
DataSourceUserName = "SPRING_DATASOURCE_USERNAME"
DataSourcePassWord = "SPRING_DATASOURCE_PASSWORD"
+ DsApiPort = 12345
+ DsAlertPort = 50052
)
// DsCondition represents one current condition of a ds cluster.
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 4592b8f..f69ee99 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -146,6 +146,95 @@ func (in *DSAlertStatus) DeepCopy() *DSAlertStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DSApi) DeepCopyInto(out *DSApi) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSApi.
+func (in *DSApi) DeepCopy() *DSApi {
+ if in == nil {
+ return nil
+ }
+ out := new(DSApi)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DSApi) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DSApiList) DeepCopyInto(out *DSApiList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DSApi, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSApiList.
+func (in *DSApiList) DeepCopy() *DSApiList {
+ if in == nil {
+ return nil
+ }
+ out := new(DSApiList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DSApiList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DSApiSpec) DeepCopyInto(out *DSApiSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSApiSpec.
+func (in *DSApiSpec) DeepCopy() *DSApiSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DSApiSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DSApiStatus) DeepCopyInto(out *DSApiStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSApiStatus.
+func (in *DSApiStatus) DeepCopy() *DSApiStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DSApiStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DSMaster) DeepCopyInto(out *DSMaster) {
*out = *in
diff --git a/config/crd/bases/ds.apache.dolphinscheduler.dev_dsalerts.yaml b/config/crd/bases/ds.apache.dolphinscheduler.dev_dsalerts.yaml
new file mode 100644
index 0000000..b49a713
--- /dev/null
+++ b/config/crd/bases/ds.apache.dolphinscheduler.dev_dsalerts.yaml
@@ -0,0 +1,1601 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.8.0
+ creationTimestamp: null
+ name: dsalerts.ds.apache.dolphinscheduler.dev
+spec:
+ group: ds.apache.dolphinscheduler.dev
+ names:
+ kind: DSAlert
+ listKind: DSAlertList
+ plural: dsalerts
+ singular: dsalert
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: DSAlert is the Schema for the dsalerts API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: DSAlertSpec defines the desired state of DSAlert
+ properties:
+ datasource:
+ properties:
+ drive_name:
+ type: string
+ password:
+ type: string
+ url:
+ type: string
+ username:
+ type: string
+ required:
+ - drive_name
+ - password
+ - url
+ - username
+ type: object
+ log_pvc_name:
+ description: LogPvcName defines the log capacity of application ,the
+ position is /opt/dolphinscheduler/logs eg 20Gi
+ type: string
+ paused:
+ default: false
+ description: Paused is to pause the control of the operator for the
+ ds-master .
+ type: boolean
+ pod:
+ description: Pod defines the policy to create pod for the dm-master
+ pod. Updating Pod does not take effect on any existing dm-master
+ pods.
+ properties:
+ affinity:
+ description: The scheduling constraints on dm-master pods.
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for
+ the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling
+ requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating
+ through the elements of this field and adding "weight"
+ to the sum if the node matches the corresponding matchExpressions;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches
+ all objects with implicit weight 0 (i.e. it's a no-op).
+ A null preferred scheduling term matches no objects
+ (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with
+ the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators
+ are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the
+ values array must be non-empty. If the
+ operator is Exists or DoesNotExist,
+ the values array must be empty. If the
+ operator is Gt or Lt, the values array
+ must have a single element, which will
+ be interpreted as an integer. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators
+ are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the
+ values array must be non-empty. If the
+ operator is Exists or DoesNotExist,
+ the values array must be empty. If the
+ operator is Gt or Lt, the values array
+ must have a single element, which will
+ be interpreted as an integer. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the
+ corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by
+ this field are not met at scheduling time, the pod will
+ not be scheduled onto the node. If the affinity requirements
+ specified by this field cease to be met at some point
+ during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from
+ its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: A null or empty node selector term
+ matches no objects. The requirements of them are
+ ANDed. The TopologySelectorTerm type implements
+ a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators
+ are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the
+ values array must be non-empty. If the
+ operator is Exists or DoesNotExist,
+ the values array must be empty. If the
+ operator is Gt or Lt, the values array
+ must have a single element, which will
+ be interpreted as an integer. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators
+ are In, NotIn, Exists, DoesNotExist.
+ Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the
+ values array must be non-empty. If the
+ operator is Exists or DoesNotExist,
+ the values array must be empty. If the
+ operator is Gt or Lt, the values array
+ must have a single element, which will
+ be interpreted as an integer. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling
+ requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating
+ through the elements of this field and adding "weight"
+ to the sum if the node has pods which matches the corresponding
+ podAffinityTerm; the node(s) with the highest sum are
+ the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources,
+ in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values,
+ a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a
+ key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of
+ string values. If the operator is
+ In or NotIn, the values array must
+ be non-empty. If the operator is
+ Exists or DoesNotExist, the values
+ array must be empty. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator
+ is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces
+ that the term applies to. The term is applied
+ to the union of the namespaces selected by
+ this field and the ones listed in the namespaces
+ field. null selector and null or empty namespaces
+ list means "this pod's namespace". An empty
+ selector ({}) matches all namespaces. This
+ field is beta-level and is only honored when
+ PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values,
+ a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a
+ key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of
+ string values. If the operator is
+ In or NotIn, the values array must
+ be non-empty. If the operator is
+ Exists or DoesNotExist, the values
+ array must be empty. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator
+ is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list
+ of namespace names that the term applies to.
+ The term is applied to the union of the namespaces
+ listed in this field and the ones selected
+ by namespaceSelector. null or empty namespaces
+ list and null namespaceSelector means "this
+ pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the
+ pods matching the labelSelector in the specified
+ namespaces, where co-located is defined as
+ running on a node whose value of the label
+ with key topologyKey matches that of any node
+ on which any of the selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the
+ corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by
+ this field are not met at scheduling time, the pod will
+ not be scheduled onto the node. If the affinity requirements
+ specified by this field cease to be met at some point
+ during pod execution (e.g. due to a pod label update),
+ the system may or may not try to eventually evict the
+ pod from its node. When there are multiple elements,
+ the lists of nodes corresponding to each podAffinityTerm
+ are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key <topologyKey> matches that of any node
+ on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources,
+ in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces
+ that the term applies to. The term is applied
+ to the union of the namespaces selected by this
+ field and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list
+ means "this pod's namespace". An empty selector
+ ({}) matches all namespaces. This field is beta-level
+ and is only honored when PodAffinityNamespaceSelector
+ feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list
+ of namespace names that the term applies to. The
+ term is applied to the union of the namespaces
+ listed in this field and the ones selected by
+ namespaceSelector. null or empty namespaces list
+ and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey
+ matches that of any node on which any of the selected
+ pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone, etc.
+ as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the anti-affinity expressions
+ specified by this field, but it may choose a node that
+ violates one or more of the expressions. The node that
+ is most preferred is the one with the greatest sum of
+ weights, i.e. for each node that meets all of the scheduling
+ requirements (resource request, requiredDuringScheduling
+ anti-affinity expressions, etc.), compute a sum by iterating
+ through the elements of this field and adding "weight"
+ to the sum if the node has pods which matches the corresponding
+ podAffinityTerm; the node(s) with the highest sum are
+ the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources,
+ in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values,
+ a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a
+ key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of
+ string values. If the operator is
+ In or NotIn, the values array must
+ be non-empty. If the operator is
+ Exists or DoesNotExist, the values
+ array must be empty. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator
+ is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces
+ that the term applies to. The term is applied
+ to the union of the namespaces selected by
+ this field and the ones listed in the namespaces
+ field. null selector and null or empty namespaces
+ list means "this pod's namespace". An empty
+ selector ({}) matches all namespaces. This
+ field is beta-level and is only honored when
+ PodAffinityNamespaceSelector feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values,
+ a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a
+ key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of
+ string values. If the operator is
+ In or NotIn, the values array must
+ be non-empty. If the operator is
+ Exists or DoesNotExist, the values
+ array must be empty. This array
+ is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator
+ is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list
+ of namespace names that the term applies to.
+ The term is applied to the union of the namespaces
+ listed in this field and the ones selected
+ by namespaceSelector. null or empty namespaces
+ list and null namespaceSelector means "this
+ pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the
+ pods matching the labelSelector in the specified
+ namespaces, where co-located is defined as
+ running on a node whose value of the label
+ with key topologyKey matches that of any node
+ on which any of the selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the
+ corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified
+ by this field are not met at scheduling time, the pod
+ will not be scheduled onto the node. If the anti-affinity
+ requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod
+ label update), the system may or may not try to eventually
+ evict the pod from its node. When there are multiple
+ elements, the lists of nodes corresponding to each podAffinityTerm
+ are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key <topologyKey> matches that of any node
+ on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources,
+ in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaceSelector:
+ description: A label query over the set of namespaces
+ that the term applies to. The term is applied
+ to the union of the namespaces selected by this
+ field and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list
+ means "this pod's namespace". An empty selector
+ ({}) matches all namespaces. This field is beta-level
+ and is only honored when PodAffinityNamespaceSelector
+ feature is enabled.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies a static list
+ of namespace names that the term applies to. The
+ term is applied to the union of the namespaces
+ listed in this field and the ones selected by
+ namespaceSelector. null or empty namespaces list
+ and null namespaceSelector means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey
+ matches that of any node on which any of the selected
+ pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations specifies the annotations to attach to
+ pods the operator creates for the dm-master cluster. The "dm-master.version"
+ annotation is reserved for the internal use of the dm-master
+ operator.
+ type: object
+ antiAffinity:
+ description: '**DEPRECATED**. Use Affinity instead.'
+ type: boolean
+ dm-masterEnv:
+ description: List of environment variables to set in the dm-master
+ container. This is used to configure dm-master process. dm-master
+ cluster cannot be created, when bad environement variables are
+ provided. Do not overwrite any flags used to bootstrap the cluster
+ (for example `--initial-cluster` flag). This field cannot be
+ updated.
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels[''<KEY>'']`,
+ `metadata.annotations[''<KEY>'']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies the labels to attach to pods the
+ operator creates for the dm-master cluster. "app" and "dm-master_*"
+ labels are reserved for the internal use of the dm-master operator.
+ Do not overwrite them.
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector specifies a map of key-value pairs.
+ For the pod to be eligible to run on a node, the node must have
+ each of the indicated key-value pairs as labels.
+ type: object
+ persistentVolumeClaimSpec:
+ description: PersistentVolumeClaimSpec is the spec to describe
+ PVC for the dm-master container This field is optional. If no
+ PVC spec, dm-master container will use emptyDir as volume Note.
+ This feature is in alpha stage. It is currently only used as
+ non-stable storage, not the stable storage. Future work need
+ to make it used as stable storage.
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: 'This field can be used to specify either: *
+ An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim) If the provisioner
+ or an external controller can support the specified data
+ source, it will create a new volume based on the contents
+ of the specified data source. If the AnyVolumeDataSource
+ feature gate is enabled, this field will always have the
+ same contents as the DataSourceRef field.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ description: 'Specifies the object from which to populate
+ the volume with data, if a non-empty volume is desired.
+ This may be any local object from a non-empty API group
+ (non core object) or a PersistentVolumeClaim object. When
+ this field is specified, volume binding will only succeed
+ if the type of the specified object matches some installed
+ volume populator or dynamic provisioner. This field will
+ replace the functionality of the DataSource field and as
+ such if both fields are non-empty, they must have the same
+ value. For backwards compatibility, both fields (DataSource
+ and DataSourceRef) will be set to the same value automatically
+ if one of them is empty and the other is non-empty. There
+ are two important differences between DataSource and DataSourceRef:
+ * While DataSource only allows two specific types of objects,
+ DataSourceRef allows any non-core object, as well as PersistentVolumeClaim
+ objects. * While DataSource ignores disallowed values (dropping
+ them), DataSourceRef preserves all values, and generates
+ an error if a disallowed value is specified. (Alpha) Using
+ this field requires the AnyVolumeDataSource feature gate
+ to be enabled.'
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource being
+ referenced. If APIGroup is not specified, the specified
+ Kind must be in the core API group. For any other third-party
+ types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: 'Resources represents the minimum resources the
+ volume should have. If RecoverVolumeExpansionFailure feature
+ is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher
+ than capacity recorded in the status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of
+ compute resources required. If Requests is omitted for
+ a container, it defaults to Limits if that is explicitly
+ specified, otherwise to an implementation-defined value.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: A label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty.
+ This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not included
+ in claim spec.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resources:
+ description: Resources is the resource requirements for the dm-master
+ container. This field cannot be updated once the cluster is
+ created.
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ securityContext:
+ description: 'SecurityContext specifies the security context for
+ the entire pod More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context'
+ properties:
+ fsGroup:
+ description: "A special supplemental group that applies to
+ all containers in a pod. Some volume types allow the Kubelet
+ to change the ownership of that volume to be owned by the
+ pod: \n 1. The owning GID will be the FSGroup 2. The setgid
+ bit is set (new files created in the volume will be owned
+ by FSGroup) 3. The permission bits are OR'd with rw-rw----
+ \n If unset, the Kubelet will not modify the ownership and
+ permissions of any volume. Note that this field cannot be
+ set when spec.os.name is windows."
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: 'fsGroupChangePolicy defines behavior of changing
+ ownership and permission of the volume before being exposed
+ inside Pod. This field will only apply to volume types which
+ support fsGroup based ownership(and permissions). It will
+ have no effect on ephemeral volume types such as: secret,
+ configmaps and emptydir. Valid values are "OnRootMismatch"
+ and "Always". If not specified, "Always" is used. Note that
+ this field cannot be set when spec.os.name is windows.'
+ type: string
+ runAsGroup:
+ description: The GID to run the entrypoint of the container
+ process. Uses runtime default if unset. May also be set
+ in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container. Note that this field
+ cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail
+ to start the container if it does. If unset or false, no
+ such validation will be performed. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the
+ value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container
+ process. Defaults to user specified in image metadata if
+ unspecified. May also be set in SecurityContext. If set
+ in both SecurityContext and PodSecurityContext, the value
+ specified in SecurityContext takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is
+ windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random
+ SELinux context for each container. May also be set in
+ SecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence
+ for that container. Note that this field cannot be set when
+ spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: The seccomp options to use by the containers
+ in this pod. Note that this field cannot be set when spec.os.name
+ is windows.
+ properties:
+ localhostProfile:
+ description: localhostProfile indicates a profile defined
+ in a file on the node should be used. The profile must
+ be preconfigured on the node to work. Must be a descending
+ path, relative to the kubelet's configured seccomp profile
+ location. Must only be set if type is "Localhost".
+ type: string
+ type:
+ description: "type indicates which kind of seccomp profile
+ will be applied. Valid options are: \n Localhost - a
+ profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile
+ should be used. Unconfined - no profile should be applied."
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: A list of groups applied to the first process
+ run in each container, in addition to the container's primary
+ GID. If unspecified, no groups will be added to any container.
+ Note that this field cannot be set when spec.os.name is
+ windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used
+ for the pod. Pods with unsupported sysctls (by the container
+ runtime) might fail to launch. Note that this field cannot
+ be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ description: The Windows specific settings applied to all
+ containers. If unspecified, the options within a container's
+ SecurityContext will be used. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext
+ takes precedence. Note that this field cannot be set when
+ spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: GMSACredentialSpec is where the GMSA admission
+ webhook (https://github.com/kubernetes-sigs/windows-gmsa)
+ inlines the contents of the GMSA credential spec named
+ by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the
+ GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: HostProcess determines if a container should
+ be run as a 'Host Process' container. This field is
+ alpha-level and will only be honored by components that
+ enable the WindowsHostProcessContainers feature flag.
+ Setting this field without the feature flag will result
+ in errors when validating the Pod. All of a Pod's containers
+ must have the same effective HostProcess value (it is
+ not allowed to have a mix of HostProcess containers
+ and non-HostProcess containers). In addition, if HostProcess
+ is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: The UserName in Windows to run the entrypoint
+ of the container process. Defaults to the user specified
+ in image metadata if unspecified. May also be set in
+ PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext
+ takes precedence.
+ type: string
+ type: object
+ type: object
+ tolerations:
+ description: Tolerations specifies the pod's tolerations.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified, allowed
+ values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to
+ the value. Valid operators are Exists and Equal. Defaults
+ to Equal. Exists is equivalent to wildcard for value,
+ so that a pod can tolerate all taints of a particular
+ category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of
+ time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the taint
+ forever (do not evict). Zero and negative values will
+ be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ re_generate:
+ default: false
+ description: ReGenerate defines if delete the old_deployment and create
+ a new deployment
+ type: boolean
+ replicas:
+ default: 3
+ description: Replicas is the expected size of the ms-master. The ds-master-operator
+ will eventually make the size of the running equal to the expected
+ size. The vaild range of the size is from 1 to 7.
+ maximum: 7
+ minimum: 1
+ type: integer
+ repository:
+ default: apache/dolphinscheduler-master
+ description: Repository is the name of the repository that hosts ds
+ container images. It should be direct clone of the repository in
+ official By default, it is `apache/dolphinscheduler-master`.
+ type: string
+ version:
+ default: 3.0.0-alpha
+ description: Version is the expected version of the ds cluster. The
+ ds-operator will eventually make the ds cluster version equal to
+ the expected version. If version is not set, default is "3.0.0-alpha".
+ type: string
+ required:
+ - datasource
+ - replicas
+ type: object
+ status:
+ description: DSAlertStatus defines the observed state of DSAlert
+ properties:
+ conditions:
+ description: Condition keeps track of all cluster conditions, if they
+ exist.
+ items:
+ description: DsCondition represents one current condition of a ds
+ cluster. A condition might not show up if it is not happening.
+ For example, if a cluster is not upgrading, the Upgrading condition
+ would not show up. If a cluster is upgrading and encountered a
+ problem that prevents the upgrade, the Upgrading condition's status
+ will would be False and communicate the problem back.
+ properties:
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status
+ to another.
+ type: string
+ lastUpdateTime:
+ description: The last time this condition was updated.
+ type: string
+ message:
+ description: A human readable message indicating details about
+ the transition.
+ type: string
+ reason:
+ description: The reason for the condition's last transition.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: Type of cluster condition.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ controlPaused:
+ default: false
+ description: ControlPaused indicates the operator pauses the control
+ of the cluster.
+ type: boolean
+ members:
+ description: Members are the dsMaster members in the cluster
+ properties:
+ ready:
+ description: Ready are the dsMaster members that are ready to
+ serve requests The member names are the same as the dsMaster
+ pod names
+ items:
+ type: string
+ type: array
+ unready:
+ description: Unready are the etcd members not ready to serve requests
+ items:
+ type: string
+ type: array
+ type: object
+ phase:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "make" to regenerate code after modifying
+ this file INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "make" to regenerate code after modifying
+ this file Phase is the cluster running phase'
+ enum:
+ - ""
+ - Creating
+ - Running
+ - Failed
+ - Finished
+ type: string
+ replicas:
+ default: 0
+ description: Replicas is the current size of the cluster
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index 578c9e7..3089d02 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -5,6 +5,7 @@ resources:
- bases/ds.apache.dolphinscheduler.dev_dsmasters.yaml
- bases/ds.apache.dolphinscheduler.dev_dsworkers.yaml
- bases/ds.apache.dolphinscheduler.dev_dsalerts.yaml
+- bases/ds.apache.dolphinscheduler.dev_dsapis.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
@@ -13,6 +14,7 @@ patchesStrategicMerge:
#- patches/webhook_in_dsmasters.yaml
#- patches/webhook_in_dsworkers.yaml
#- patches/webhook_in_dsalerts.yaml
+#- patches/webhook_in_dsapis.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
@@ -20,6 +22,7 @@ patchesStrategicMerge:
#- patches/cainjection_in_dsmasters.yaml
#- patches/cainjection_in_dsworkers.yaml
#- patches/cainjection_in_dsalerts.yaml
+#- patches/cainjection_in_dsapis.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/crd/patches/cainjection_in_dsalerts.yaml b/config/crd/patches/cainjection_in_dsalerts.yaml
new file mode 100644
index 0000000..014aed7
--- /dev/null
+++ b/config/crd/patches/cainjection_in_dsalerts.yaml
@@ -0,0 +1,7 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: dsalerts.ds.apache.dolphinscheduler.dev
diff --git a/config/crd/patches/webhook_in_dsalerts.yaml b/config/crd/patches/webhook_in_dsalerts.yaml
new file mode 100644
index 0000000..2a239f4
--- /dev/null
+++ b/config/crd/patches/webhook_in_dsalerts.yaml
@@ -0,0 +1,16 @@
+# The following patch enables a conversion webhook for the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: dsalerts.ds.apache.dolphinscheduler.dev
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
+ conversionReviewVersions:
+ - v1
diff --git a/config/ds/alert/ds-alert-deployment.yaml b/config/ds/alert/ds-alert-deployment.yaml
deleted file mode 100644
index 627e1d6..0000000
--- a/config/ds/alert/ds-alert-deployment.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: ds-alert-deployment
- namespace: ds
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: ds-alert
- template:
- metadata:
- labels:
- app: ds-alert
- spec:
- containers:
- - name: ds-alert
- image: apache/dolphinscheduler-alert-server:3.0.0-alpha
- imagePullPolicy: "IfNotPresent"
- ports:
- - containerPort: 50052
- env:
- - name: SPRING_DATASOURCE_DRIVER_CLASS_NAME
- value: org.postgresql.Driver
- - name: SPRING_DATASOURCE_URL
- value: "jdbc:postgresql://172.17.0.4:5432/dolphinscheduler"
- - name: SPRING_DATASOURCE_USERNAME
- value: "postgresadmin"
- - name: SPRING_DATASOURCE_PASSWORD
- value: "admin12345"
\ No newline at end of file
diff --git a/config/ds/alert/ds-alert-service.yaml b/config/ds/alert/ds-alert-service.yaml
deleted file mode 100644
index 21bc518..0000000
--- a/config/ds/alert/ds-alert-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: ds-alert-service
- namespace: ds
-spec:
- type: NodePort
- ports:
- - protocol: TCP
- port: 50052
- targetPort: 50052
- selector:
- app: ds-alert
-
-
-
-
diff --git a/config/rbac/dsalert_editor_role.yaml b/config/rbac/dsalert_editor_role.yaml
new file mode 100644
index 0000000..b0c7a8a
--- /dev/null
+++ b/config/rbac/dsalert_editor_role.yaml
@@ -0,0 +1,24 @@
+# permissions for end users to edit dsalerts.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dsalert-editor-role
+rules:
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsalerts
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsalerts/status
+ verbs:
+ - get
diff --git a/config/rbac/dsalert_viewer_role.yaml b/config/rbac/dsalert_viewer_role.yaml
new file mode 100644
index 0000000..a365e12
--- /dev/null
+++ b/config/rbac/dsalert_viewer_role.yaml
@@ -0,0 +1,20 @@
+# permissions for end users to view dsalerts.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dsalert-viewer-role
+rules:
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsalerts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsalerts/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 9ef8743..5b6627c 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -31,6 +31,32 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsapis
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsapis/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - ds.apache.dolphinscheduler.dev
+ resources:
+ - dsapis/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- ds.apache.dolphinscheduler.dev
resources:
diff --git a/config/samples/ds_v1alpha1_dsalert.yaml b/config/samples/ds_v1alpha1_dsalert.yaml
new file mode 100644
index 0000000..56c5647
--- /dev/null
+++ b/config/samples/ds_v1alpha1_dsalert.yaml
@@ -0,0 +1,16 @@
+apiVersion: ds.apache.dolphinscheduler.dev/v1alpha1
+kind: DSAlert
+metadata:
+ name: ds-alert
+ namespace: ds
+ labels:
+ app: ds-alert
+spec:
+ replicas: 1
+ version: 3.0.0-alpha
+ repository: apache/dolphinscheduler-alert-server
+ datasource:
+ drive_name: "org.postgresql.Driver"
+ url: "jdbc:postgresql://172.17.0.4:5432/dolphinscheduler"
+ username: "postgresadmin"
+ password: "admin12345"
diff --git a/controllers/alert_reconcile.go b/controllers/alert_reconcile.go
index 8326f23..0f6d0b1 100644
--- a/controllers/alert_reconcile.go
+++ b/controllers/alert_reconcile.go
@@ -27,18 +27,18 @@ import (
func createAlertService(cluster *dsv1alpha1.DSAlert) *corev1.Service {
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: "ds-alert-service",
+ Name: dsv1alpha1.DsAlertServiceValue,
Namespace: cluster.Namespace,
- Labels: map[string]string{dsv1alpha1.DsAppName: "ds-alert-service"},
+ Labels: map[string]string{dsv1alpha1.DsAppName: dsv1alpha1.DsAlertServiceValue},
},
Spec: corev1.ServiceSpec{
- Selector: map[string]string{dsv1alpha1.DsAppName: "ds-alert"},
+ Selector: map[string]string{dsv1alpha1.DsAppName: dsv1alpha1.DsAlert},
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
- Port: *int32Ptr(int32(50052)),
+ Port: *int32Ptr(int32(dsv1alpha1.DsAlertPort)),
TargetPort: intstr.IntOrString{
- IntVal: 50052,
+ IntVal: dsv1alpha1.DsAlertPort,
},
},
},
@@ -50,25 +50,25 @@ func createAlertService(cluster *dsv1alpha1.DSAlert) *corev1.Service {
func createAlertDeployment(cluster *dsv1alpha1.DSAlert) *v1.Deployment {
alertDeployment := v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: "ds-alert-deployment",
- Namespace: "ds",
+ Name: dsv1alpha1.DsAlertDeploymentValue,
+ Namespace: cluster.Namespace,
},
Spec: v1.DeploymentSpec{
Replicas: int32Ptr(int32(cluster.Spec.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
- "app": "ds-alert",
+ dsv1alpha1.DsAppName: dsv1alpha1.DsAlert,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": "ds-alert",
+ dsv1alpha1.DsAppName: dsv1alpha1.DsAlert,
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
- Name: "ds-alert",
+ Name: dsv1alpha1.DsAlert,
Image: ImageName(cluster.Spec.Repository, cluster.Spec.Version),
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
@@ -90,7 +90,7 @@ func createAlertDeployment(cluster *dsv1alpha1.DSAlert) *v1.Deployment {
},
},
Ports: []corev1.ContainerPort{{
- ContainerPort: 50052,
+ ContainerPort: dsv1alpha1.DsAlertPort,
},
},
},
diff --git a/controllers/alert_reconcile.go b/controllers/api_reconcile.go
similarity index 72%
copy from controllers/alert_reconcile.go
copy to controllers/api_reconcile.go
index 8326f23..0062c14 100644
--- a/controllers/alert_reconcile.go
+++ b/controllers/api_reconcile.go
@@ -24,22 +24,24 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
-func createAlertService(cluster *dsv1alpha1.DSAlert) *corev1.Service {
+func createApiService(cluster *dsv1alpha1.DSApi) *corev1.Service {
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: "ds-alert-service",
+ Name: dsv1alpha1.DsApiServiceValue,
Namespace: cluster.Namespace,
- Labels: map[string]string{dsv1alpha1.DsAppName: "ds-alert-service"},
+ Labels: map[string]string{dsv1alpha1.DsAppName: dsv1alpha1.DsApiServiceValue},
},
Spec: corev1.ServiceSpec{
- Selector: map[string]string{dsv1alpha1.DsAppName: "ds-alert"},
+ Type: corev1.ServiceTypeNodePort,
+ Selector: map[string]string{dsv1alpha1.DsAppName: dsv1alpha1.DsApi},
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
- Port: *int32Ptr(int32(50052)),
+ Port: *int32Ptr(int32(dsv1alpha1.DsApiPort)),
TargetPort: intstr.IntOrString{
- IntVal: 50052,
+ IntVal: dsv1alpha1.DsApiPort,
},
+ NodePort: cluster.Spec.NodePort,
},
},
},
@@ -47,28 +49,28 @@ func createAlertService(cluster *dsv1alpha1.DSAlert) *corev1.Service {
return &service
}
-func createAlertDeployment(cluster *dsv1alpha1.DSAlert) *v1.Deployment {
- alertDeployment := v1.Deployment{
+func createApiDeployment(cluster *dsv1alpha1.DSApi) *v1.Deployment {
+ ApiDeployment := v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: "ds-alert-deployment",
- Namespace: "ds",
+ Name: dsv1alpha1.DsApiDeploymentValue,
+ Namespace: cluster.Namespace,
},
Spec: v1.DeploymentSpec{
Replicas: int32Ptr(int32(cluster.Spec.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
- "app": "ds-alert",
+ dsv1alpha1.DsAppName: dsv1alpha1.DsApi,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": "ds-alert",
+ dsv1alpha1.DsAppName: dsv1alpha1.DsApi,
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
- Name: "ds-alert",
+ Name: dsv1alpha1.DsApi,
Image: ImageName(cluster.Spec.Repository, cluster.Spec.Version),
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{
@@ -90,7 +92,7 @@ func createAlertDeployment(cluster *dsv1alpha1.DSAlert) *v1.Deployment {
},
},
Ports: []corev1.ContainerPort{{
- ContainerPort: 50052,
+ ContainerPort: dsv1alpha1.DsApiPort,
},
},
},
@@ -99,9 +101,5 @@ func createAlertDeployment(cluster *dsv1alpha1.DSAlert) *v1.Deployment {
},
},
}
- return &alertDeployment
-}
-
-func int32Ptr(i int32) *int32 {
- return &i
+ return &ApiDeployment
}
diff --git a/controllers/dsalert_controller.go b/controllers/dsalert_controller.go
new file mode 100644
index 0000000..787585b
--- /dev/null
+++ b/controllers/dsalert_controller.go
@@ -0,0 +1,221 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ dsv1alpha1 "dolphinscheduler-operator/api/v1alpha1"
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "time"
+)
+
+// DSAlertReconciler reconciles a DSAlert object
+var (
+ alertLogger = ctrl.Log.WithName("DSAlert-controller")
+)
+
+type DSAlertReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
+}
+
+//+kubebuilder:rbac:groups=ds.apache.dolphinscheduler.dev,resources=dsalerts,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=ds.apache.dolphinscheduler.dev,resources=dsalerts/status,verbs=get;update;patch
+//+kubebuilder:rbac:groups=ds.apache.dolphinscheduler.dev,resources=dsalerts/finalizers,verbs=update
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+// the DSAlert object against the actual cluster state, and then
+// perform operations to make the cluster state reflect the state specified by
+// the user.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
+func (r *DSAlertReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+
+ alertLogger.Info("dmAlert start reconcile logic")
+ defer alertLogger.Info("dmAlert Reconcile end ---------------------------------------------")
+
+ cluster := &dsv1alpha1.DSAlert{}
+
+ if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
+ if errors.IsNotFound(err) {
+ r.Recorder.Event(cluster, corev1.EventTypeWarning, "dsAlert is not Found", "dsAlert is not Found")
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, err
+ }
+ desired := cluster.DeepCopy()
+
+ // Handler finalizer
+ // examine DeletionTimestamp to determine if object is under deletion
+ if cluster.ObjectMeta.DeletionTimestamp.IsZero() {
+ // The object is not being deleted, so if it does not have our finalizer,
+ // then lets add the finalizer and update the object. This is equivalent
+ // registering our finalizer.
+ if !controllerutil.ContainsFinalizer(desired, dsv1alpha1.FinalizerName) {
+ controllerutil.AddFinalizer(desired, dsv1alpha1.FinalizerName)
+ if err := r.Update(ctx, desired); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+ } else {
+ // The object is being deleted
+
+ if controllerutil.ContainsFinalizer(desired, dsv1alpha1.FinalizerName) {
+ // our finalizer is present, so lets handle any external dependency
+ if err := r.ensureDSAlertDeleted(ctx, cluster); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // remove our finalizer from the list and update it.
+ controllerutil.RemoveFinalizer(desired, dsv1alpha1.FinalizerName)
+ if err := r.Update(ctx, desired); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+ // Stop reconciliation as the item is being deleted
+ return ctrl.Result{}, nil
+ }
+
+ if cluster.Spec.Paused {
+ alertLogger.Info("ds-alert control has been paused: ", "ds-alert-name", cluster.Name)
+ desired.Status.ControlPaused = true
+ if err := r.Status().Patch(ctx, desired, client.MergeFrom(cluster)); err != nil {
+ return ctrl.Result{}, err
+ }
+ r.Recorder.Event(cluster, corev1.EventTypeNormal, "the spec status is paused", "do nothing")
+ return ctrl.Result{}, nil
+ }
+
+ // 1. First time we see the ds-master-cluster, initialize it
+ if cluster.Status.Phase == dsv1alpha1.DsPhaseNone {
+ desired.Status.Phase = dsv1alpha1.DsPhaseCreating
+ alertLogger.Info("phase had been changed from none ---> creating")
+ err := r.Client.Status().Patch(ctx, desired, client.MergeFrom(cluster))
+ return ctrl.Result{RequeueAfter: 100 * time.Millisecond}, err
+ }
+
+ //2 ensure the alert service
+ alertLogger.Info("Ensuring alert service")
+
+ if err := r.ensureAlertService(ctx, cluster); err != nil {
+ return ctrl.Result{Requeue: true}, nil
+ }
+
+ if requeue, err := r.ensureAlertDeployment(ctx, cluster); err != nil {
+ return ctrl.Result{Requeue: false}, err
+ } else {
+ if !requeue {
+ return ctrl.Result{Requeue: false}, nil
+ }
+ }
+
+ alertLogger.Info("******************************************************")
+ desired.Status.Phase = dsv1alpha1.DsPhaseNone
+ if err := r.Update(ctx, desired); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{Requeue: false}, nil
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *DSAlertReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&dsv1alpha1.DSAlert{}).
+ Owns(&v1.Deployment{}).
+ Owns(&corev1.Service{}).
+ Owns(&corev1.Pod{}).
+ Complete(r)
+}
+
+func (r *DSAlertReconciler) ensureDSAlertDeleted(ctx context.Context, DSAlert *dsv1alpha1.DSAlert) error {
+ if err := r.Client.Delete(ctx, DSAlert, client.PropagationPolicy(metav1.DeletePropagationOrphan)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *DSAlertReconciler) ensureAlertService(ctx context.Context, cluster *dsv1alpha1.DSAlert) error {
+ // 1. Client service
+ service := &corev1.Service{}
+ namespacedName := types.NamespacedName{Namespace: cluster.Namespace, Name: dsv1alpha1.DsAlertServiceValue}
+ if err := r.Client.Get(ctx, namespacedName, service); err != nil {
+ // Local cache not found
+ logger.Info("get service error")
+ if apierrors.IsNotFound(err) {
+ service = createAlertService(cluster)
+ if err := controllerutil.SetControllerReference(cluster, service, r.Scheme); err != nil {
+ logger.Info("create alert service error")
+ return err
+ }
+ // Remote may already exist, so we will return err, for the next time, this code will not execute
+ if err := r.Client.Create(ctx, service); err != nil {
+ logger.Info("create alert service error1")
+ return err
+ }
+ logger.Info("the alert service had been created")
+ }
+ }
+ return nil
+}
+
+func (r *DSAlertReconciler) ensureAlertDeployment(ctx context.Context, cluster *dsv1alpha1.DSAlert) (bool, error) {
+ deployment := &v1.Deployment{}
+ deploymentNamespaceName := types.NamespacedName{Namespace: cluster.Namespace, Name: dsv1alpha1.DsAlertDeploymentValue}
+ if err := r.Client.Get(ctx, deploymentNamespaceName, deployment); err != nil {
+ if apierrors.IsNotFound(err) {
+ deployment = createAlertDeployment(cluster)
+ }
+ if err := controllerutil.SetControllerReference(cluster, deployment, r.Scheme); err != nil {
+ return true, err
+ }
+ if err := r.Client.Create(ctx, deployment); err == nil {
+ return false, nil
+ } else {
+ return true, err
+ }
+ } else {
+ err := r.updateAlertDeployment(ctx, deployment, cluster)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ return true, nil
+}
+
+//only notice the property of replicas and image and version
+func (r *DSAlertReconciler) updateAlertDeployment(ctx context.Context, deployment *v1.Deployment, cluster *dsv1alpha1.DSAlert) error {
+ deployment.Spec.Replicas = int32Ptr(int32(cluster.Spec.Replicas))
+ deployment.Spec.Template.Spec.Containers[0].Image = ImageName(cluster.Spec.Repository, cluster.Spec.Version)
+ if err := r.Client.Update(ctx, deployment); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/controllers/dsmaster_controller.go b/controllers/dsmaster_controller.go
index 559d61b..f863d45 100644
--- a/controllers/dsmaster_controller.go
+++ b/controllers/dsmaster_controller.go
@@ -43,7 +43,6 @@ import (
const (
dsMasterLabel = "ds-master"
dsServiceLabel = "ds-operator-service"
- dsServiceName = "ds-operator-service"
)
var (
@@ -54,7 +53,7 @@ var (
type DSMasterReconciler struct {
client.Client
Scheme *runtime.Scheme
- Recorder record.EventRecorder
+ recorder record.EventRecorder
clusters sync.Map
resyncCh chan event.GenericEvent
}
@@ -79,7 +78,7 @@ func (r *DSMasterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
if errors.IsNotFound(err) {
- r.Recorder.Event(cluster, corev1.EventTypeWarning, "dsMaster is not Found", "dsMaster is not Found")
+ r.recorder.Event(cluster, corev1.EventTypeWarning, "dsMaster is not Found", "dsMaster is not Found")
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
@@ -125,7 +124,7 @@ func (r *DSMasterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
if err := r.Status().Patch(ctx, desired, client.MergeFrom(cluster)); err != nil {
return ctrl.Result{}, err
}
- r.Recorder.Event(cluster, corev1.EventTypeNormal, "the spec status is paused", "do nothing")
+ r.recorder.Event(cluster, corev1.EventTypeNormal, "the spec status is paused", "do nothing")
return ctrl.Result{}, nil
}
@@ -179,7 +178,7 @@ func (r *DSMasterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
func (r *DSMasterReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.clusters = sync.Map{}
r.resyncCh = make(chan event.GenericEvent)
- r.Recorder = mgr.GetEventRecorderFor("master-controller")
+ r.recorder = mgr.GetEventRecorderFor("master-controller")
filter := &Predicate{}
return ctrl.NewControllerManagedBy(mgr).
@@ -217,7 +216,7 @@ func (r *DSMasterReconciler) ensureScaled(ctx context.Context, cluster *dsv1alph
if len(ms) < cluster.Spec.Replicas {
err = r.createMember(ctx, cluster)
if err != nil {
- r.Recorder.Event(cluster, corev1.EventTypeWarning, "cannot create the new ds-master pod", "the ds-master pod had been created failed")
+ r.recorder.Event(cluster, corev1.EventTypeWarning, "cannot create the new ds-master pod", "the ds-master pod had been created failed")
return true, err
}
// Cluster modified, next reconcile will enter r.ensureMembers()
diff --git a/controllers/dsworker_controller.go b/controllers/dsworker_controller.go
index 00a97af..e16abc7 100644
--- a/controllers/dsworker_controller.go
+++ b/controllers/dsworker_controller.go
@@ -55,7 +55,6 @@ var (
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
-// TODO(user): Modify the Reconcile function to compare the state specified by
// the DSWorker object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
@@ -157,6 +156,7 @@ func (r *DSWorkerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
func (r *DSWorkerReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&dsv1alpha1.DSWorker{}).
+ Owns(&corev1.Pod{}).
Complete(r)
}
diff --git a/controllers/suite_test.go b/controllers/suite_test.go
index 4e379b7..4baf528 100644
--- a/controllers/suite_test.go
+++ b/controllers/suite_test.go
@@ -65,7 +65,7 @@ var _ = BeforeSuite(func() {
err = dsv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
- //+kubebuilder:scaffold:scheme
+ //+kubebuilder:scaffold:Scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
diff --git a/main.go b/main.go
index 9f12997..56cc422 100644
--- a/main.go
+++ b/main.go
@@ -99,6 +99,13 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "DSAlert")
os.Exit(1)
}
+ if err = (&controllers.DSApiReconciler{
+ Client: mgr.GetClient(),
+ Scheme: mgr.GetScheme(),
+ }).SetupWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "DSApi")
+ os.Exit(1)
+ }
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {