You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by zh...@apache.org on 2023/04/07 02:19:14 UTC

[shardingsphere-on-cloud] branch main updated: fix: fix proxy controller according to golangci-lint (#299)

This is an automated email from the ASF dual-hosted git repository.

zhonghongsheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git


The following commit(s) were added to refs/heads/main by this push:
     new c42769d  fix: fix proxy controller according to golangci-lint (#299)
c42769d is described below

commit c42769d72a7fee3f9ad025568235998b16ff3267
Author: liyao <ma...@126.com>
AuthorDate: Fri Apr 7 10:19:09 2023 +0800

    fix: fix proxy controller according to golangci-lint (#299)
    
    * chore: update golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: fix according to golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * refactor: refactor update service with computenode
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * fix: update service
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * refactor: refactor proxy controller reconcile hpa, deployment and configmap according to golangci-lint
    
    Signed-off-by: mlycore <ma...@126.com>
    
    * chore: update commit-msg action
    
    Signed-off-by: mlycore <ma...@126.com>
    
    ---------
    
    Signed-off-by: mlycore <ma...@126.com>
---
 .github/workflows/commit-msg.yml                   |   2 +-
 shardingsphere-operator/.golangci.yml              |   6 +-
 .../pkg/controllers/compute_node_controller.go     | 128 ++++++++++++++-------
 .../pkg/controllers/proxy_controller.go            | 103 +++++++++--------
 .../pkg/reconcile/computenode/configmap.go         |  23 ++--
 .../pkg/reconcile/computenode/service.go           |  46 ++++----
 6 files changed, 186 insertions(+), 122 deletions(-)

diff --git a/.github/workflows/commit-msg.yml b/.github/workflows/commit-msg.yml
index 1b58530..f9fae1c 100644
--- a/.github/workflows/commit-msg.yml
+++ b/.github/workflows/commit-msg.yml
@@ -36,6 +36,6 @@ jobs:
           key: ${{ runner.os }}-lint-commit-message
       - name: Lint commit message
         run: |
-          ! git log --oneline ${{ github.event.pull_request.base.sha }}... \
+            git log --oneline ${{ github.event.pull_request.base.sha }}... \
             | grep -vP '^\w{8} Merge ' \
             | grep -vP '^\w{8} (feat|fix|build|chore|docs|style|refactor|perf|test|ci)(\(\w+(-\w+)?\))?:(\s*).*'
diff --git a/shardingsphere-operator/.golangci.yml b/shardingsphere-operator/.golangci.yml
index b1a4835..b57c901 100644
--- a/shardingsphere-operator/.golangci.yml
+++ b/shardingsphere-operator/.golangci.yml
@@ -58,7 +58,7 @@ linters:
     - varnamelen
     - gocritic
     #- exhaustruct
-    #- nestif
+    - nestif
     #- wsl
     #- gocognit
 # Refers: https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322
@@ -165,10 +165,10 @@ linters-settings:
     # Report pre-allocation suggestions on for loops.
     # Default: false
     for-loops: true
-  #nestif:
+  nestif:
     # Minimal complexity of if statements to report.
     # Default: 5
-  # min-complexity: 4
+    min-complexity: 4
   misspell:
     # Correct spellings using locale preferences for US or UK.
     # Setting locale to US will correct the British spelling of 'colour' to 'color'.
diff --git a/shardingsphere-operator/pkg/controllers/compute_node_controller.go b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
index 7d7f862..4069a7a 100644
--- a/shardingsphere-operator/pkg/controllers/compute_node_controller.go
+++ b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
@@ -29,7 +29,7 @@ import (
 
 	"github.com/go-logr/logr"
 	appsv1 "k8s.io/api/apps/v1"
-	v1 "k8s.io/api/core/v1"
+	corev1 "k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -59,9 +59,9 @@ func (r *ComputeNodeReconciler) SetupWithManager(mgr ctrl.Manager) error {
 	return ctrl.NewControllerManagedBy(mgr).
 		For(&v1alpha1.ComputeNode{}).
 		Owns(&appsv1.Deployment{}).
-		Owns(&v1.Pod{}).
-		Owns(&v1.Service{}).
-		Owns(&v1.ConfigMap{}).
+		Owns(&corev1.Pod{}).
+		Owns(&corev1.Service{}).
+		Owns(&corev1.ConfigMap{}).
 		Complete(r)
 }
 
@@ -162,39 +162,85 @@ func (r *ComputeNodeReconciler) createService(ctx context.Context, cn *v1alpha1.
 	return err
 }
 
-func (r *ComputeNodeReconciler) updateService(ctx context.Context, cn *v1alpha1.ComputeNode, cur *v1.Service) error {
-	if cn.Spec.ServiceType == v1.ServiceTypeNodePort {
-		for idx := range cur.Spec.Ports {
-			for i := range cn.Spec.PortBindings {
-				if cur.Spec.Ports[idx].Name == cn.Spec.PortBindings[i].Name {
-					if cn.Spec.PortBindings[i].NodePort == 0 {
-						cn.Spec.PortBindings[i].NodePort = cur.Spec.Ports[idx].NodePort
-						if err := r.Update(ctx, cn); err != nil {
-							return err
-						}
-					}
-					break
-				}
-			}
+func (r *ComputeNodeReconciler) updateService(ctx context.Context, cn *v1alpha1.ComputeNode, cur *corev1.Service) error {
+	// if cn.Spec.ServiceType == v1.ServiceTypeNodePort {
+	// 	for idx := range cur.Spec.Ports {
+	// 		for i := range cn.Spec.PortBindings {
+	// 			if cur.Spec.Ports[idx].Name == cn.Spec.PortBindings[i].Name {
+	// 				if cn.Spec.PortBindings[i].NodePort == 0 {
+	// 					cn.Spec.PortBindings[i].NodePort = cur.Spec.Ports[idx].NodePort
+	// 					if err := r.Update(ctx, cn); err != nil {
+	// 						return err
+	// 					}
+	// 				}
+	// 				break
+	// 			}
+	// 		}
+	// 	}
+	// }
+	// if cn.Spec.ServiceType == v1.ServiceTypeClusterIP {
+	// 	for idx := range cn.Spec.PortBindings {
+	// 		if cn.Spec.PortBindings[idx].NodePort != 0 {
+	// 			cn.Spec.PortBindings[idx].NodePort = 0
+	// 			if err := r.Update(ctx, cn); err != nil {
+	// 				return err
+	// 			}
+	// 			break
+	// 		}
+	// 	}
+	// }
+
+	switch cn.Spec.ServiceType {
+	case corev1.ServiceTypeClusterIP:
+		updateServiceClusterIP(cn.Spec.PortBindings)
+		if err := r.Update(ctx, cn); err != nil {
+			return err
+		}
+	case corev1.ServiceTypeExternalName:
+		fallthrough
+	case corev1.ServiceTypeLoadBalancer:
+		fallthrough
+	case corev1.ServiceTypeNodePort:
+		updateServiceNodePort(cn.Spec.PortBindings, cur.Spec.Ports)
+		if err := r.Update(ctx, cn); err != nil {
+			return err
 		}
 	}
-	if cn.Spec.ServiceType == v1.ServiceTypeClusterIP {
-		for idx := range cn.Spec.PortBindings {
-			if cn.Spec.PortBindings[idx].NodePort != 0 {
-				cn.Spec.PortBindings[idx].NodePort = 0
-				if err := r.Update(ctx, cn); err != nil {
-					return err
+
+	exp := reconcile.UpdateService(cn, cur)
+	return r.Update(ctx, exp)
+}
+
+func updateServiceNodePort(portBindings []v1alpha1.PortBinding, svcports []corev1.ServicePort) {
+	for idx := range svcports {
+		for i := range portBindings {
+			if svcports[idx].Name == portBindings[i].Name {
+				if portBindings[i].NodePort == 0 {
+					portBindings[i].NodePort = svcports[idx].NodePort
+					break
+					// if err := r.Update(ctx, cn); err != nil {
+					// 	return err
+					// }
 				}
 				break
 			}
 		}
 	}
+}
 
-	exp := reconcile.UpdateService(cn, cur)
-	return r.Update(ctx, exp)
+func updateServiceClusterIP(portBindings []v1alpha1.PortBinding) {
+	for idx := range portBindings {
+		if portBindings[idx].NodePort != 0 {
+			portBindings[idx].NodePort = 0
+			// if err := r.Update(ctx, cn); err != nil {
+			// 	return err
+			// }
+			break
+		}
+	}
 }
 
-func (r *ComputeNodeReconciler) getServiceByNamespacedName(ctx context.Context, namespacedName types.NamespacedName) (*v1.Service, bool, error) {
+func (r *ComputeNodeReconciler) getServiceByNamespacedName(ctx context.Context, namespacedName types.NamespacedName) (*corev1.Service, bool, error) {
 	svc, err := r.Service.GetByNamespacedName(ctx, namespacedName)
 	if err != nil {
 		return nil, false, err
@@ -214,12 +260,12 @@ func (r *ComputeNodeReconciler) createConfigMap(ctx context.Context, cn *v1alpha
 	return err
 }
 
-func (r *ComputeNodeReconciler) updateConfigMap(ctx context.Context, cn *v1alpha1.ComputeNode, cm *v1.ConfigMap) error {
+func (r *ComputeNodeReconciler) updateConfigMap(ctx context.Context, cn *v1alpha1.ComputeNode, cm *corev1.ConfigMap) error {
 	exp := reconcile.UpdateConfigMap(cn, cm)
 	return r.Update(ctx, exp)
 }
 
-func (r *ComputeNodeReconciler) getConfigMapByNamespacedName(ctx context.Context, namespacedName types.NamespacedName) (*v1.ConfigMap, bool, error) {
+func (r *ComputeNodeReconciler) getConfigMapByNamespacedName(ctx context.Context, namespacedName types.NamespacedName) (*corev1.ConfigMap, bool, error) {
 	cm, err := r.ConfigMap.GetByNamespacedName(ctx, namespacedName)
 	if err != nil {
 		return nil, false, err
@@ -243,12 +289,12 @@ func (r *ComputeNodeReconciler) reconcileConfigMap(ctx context.Context, cn *v1al
 }
 
 func (r *ComputeNodeReconciler) reconcileStatus(ctx context.Context, cn *v1alpha1.ComputeNode) error {
-	podlist := &v1.PodList{}
+	podlist := &corev1.PodList{}
 	if err := r.List(ctx, podlist, client.InNamespace(cn.Namespace), client.MatchingLabels(cn.Spec.Selector.MatchLabels)); err != nil {
 		return err
 	}
 
-	service := &v1.Service{}
+	service := &corev1.Service{}
 	if err := r.Get(ctx, types.NamespacedName{
 		Namespace: cn.Namespace,
 		Name:      cn.Name,
@@ -271,12 +317,12 @@ func (r *ComputeNodeReconciler) reconcileStatus(ctx context.Context, cn *v1alpha
 	return r.Status().Update(ctx, rt)
 }
 
-func getReadyProxyInstances(podlist *v1.PodList) int32 {
+func getReadyProxyInstances(podlist *corev1.PodList) int32 {
 	var cnt int32
 	for idx := range podlist.Items {
-		if podlist.Items[idx].Status.Phase == v1.PodRunning {
+		if podlist.Items[idx].Status.Phase == corev1.PodRunning {
 			for i := range podlist.Items[idx].Status.Conditions {
-				if podlist.Items[idx].Status.Conditions[i].Type == v1.PodReady && podlist.Items[idx].Status.Conditions[i].Status == v1.ConditionTrue {
+				if podlist.Items[idx].Status.Conditions[i].Type == corev1.PodReady && podlist.Items[idx].Status.Conditions[i].Status == corev1.ConditionTrue {
 					for j := range podlist.Items[idx].Status.ContainerStatuses {
 						if podlist.Items[idx].Status.ContainerStatuses[j].Name == "shardingsphere-proxy" && podlist.Items[idx].Status.ContainerStatuses[j].Ready {
 							cnt++
@@ -332,7 +378,7 @@ func updateNotReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond *
 	return cur
 }
 
-func clusterCondition(podlist *v1.PodList) v1alpha1.ComputeNodeCondition {
+func clusterCondition(podlist *corev1.PodList) v1alpha1.ComputeNodeCondition {
 	cond := v1alpha1.ComputeNodeCondition{}
 	if len(podlist.Items) == 0 {
 		return cond
@@ -369,22 +415,22 @@ func clusterCondition(podlist *v1.PodList) v1alpha1.ComputeNodeCondition {
 	//FIXME: do not capture ConditionStarted in some cases
 	for idx := range podlist.Items {
 		switch podlist.Items[idx].Status.Phase {
-		case v1.PodSucceeded:
+		case corev1.PodSucceeded:
 			return condSucceed
-		case v1.PodRunning:
+		case corev1.PodRunning:
 			return condStarted
-		case v1.PodUnknown:
+		case corev1.PodUnknown:
 			return condUnknown
-		case v1.PodPending:
+		case corev1.PodPending:
 			return condDeployed
-		case v1.PodFailed:
+		case corev1.PodFailed:
 			return condFailed
 		}
 	}
 	return cond
 }
 
-func reconcileComputeNodeStatus(podlist *v1.PodList, svc *v1.Service) *v1alpha1.ComputeNodeStatus {
+func reconcileComputeNodeStatus(podlist *corev1.PodList, svc *corev1.Service) *v1alpha1.ComputeNodeStatus {
 	status := &v1alpha1.ComputeNodeStatus{}
 
 	status.Replicas = int32(len(podlist.Items))
diff --git a/shardingsphere-operator/pkg/controllers/proxy_controller.go b/shardingsphere-operator/pkg/controllers/proxy_controller.go
index 627fab7..6bd16ff 100644
--- a/shardingsphere-operator/pkg/controllers/proxy_controller.go
+++ b/shardingsphere-operator/pkg/controllers/proxy_controller.go
@@ -111,66 +111,72 @@ func (r *ProxyReconciler) reconcile(ctx context.Context, req ctrl.Request, rt *v
 }
 
 func (r *ProxyReconciler) reconcileDeployment(ctx context.Context, namespacedName types.NamespacedName) (ctrl.Result, error) {
-	ssproxy, err := r.getRuntimeShardingSphereProxy(ctx, namespacedName)
+	proxy, err := r.getRuntimeShardingSphereProxy(ctx, namespacedName)
 	if err != nil {
 		return ctrl.Result{}, err
 	}
 
 	deploy := &appsv1.Deployment{}
+	err = r.Get(ctx, namespacedName, deploy)
 
-	if err := r.Get(ctx, namespacedName, deploy); err != nil {
-		if !apierrors.IsNotFound(err) {
+	if apierrors.IsNotFound(err) {
+		exp := reconcile.NewDeployment(proxy)
+		if err := r.Create(ctx, exp); err != nil {
 			return ctrl.Result{}, err
-		} else {
-			exp := reconcile.NewDeployment(ssproxy)
-			if err := r.Create(ctx, exp); err != nil {
-				return ctrl.Result{}, err
-			}
 		}
-	} else {
-		act := deploy.DeepCopy()
+	}
 
-		exp := reconcile.UpdateDeployment(ssproxy, act)
-		if err != nil {
-			return ctrl.Result{}, err
-		}
-		if err := r.Update(ctx, exp); err != nil {
-			return ctrl.Result{Requeue: true}, err
-		}
+	if err != nil {
+		return ctrl.Result{}, err
+	}
+
+	act := deploy.DeepCopy()
+	exp := reconcile.UpdateDeployment(proxy, act)
+
+	if err := r.Update(ctx, exp); err != nil {
+		return ctrl.Result{Requeue: true}, err
 	}
 	return ctrl.Result{}, nil
 }
 
 func (r *ProxyReconciler) reconcileHPA(ctx context.Context, namespacedName types.NamespacedName) (ctrl.Result, error) {
-	ssproxy, err := r.getRuntimeShardingSphereProxy(ctx, namespacedName)
+	proxy, err := r.getRuntimeShardingSphereProxy(ctx, namespacedName)
 	if err != nil {
 		return ctrl.Result{}, err
 	}
 
+	// Get the HPA
 	hpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
+	err = r.Get(ctx, namespacedName, hpa)
 
-	if err := r.Get(ctx, namespacedName, hpa); err != nil {
-		if !apierrors.IsNotFound(err) {
-			return ctrl.Result{}, err
-		} else if ssproxy.Spec.AutomaticScaling != nil && ssproxy.Spec.AutomaticScaling.Enable {
-			exp := reconcile.NewHPA(ssproxy)
+	// If the HPA doesn't exist, create it
+	if apierrors.IsNotFound(err) {
+		if proxy.Spec.AutomaticScaling != nil && proxy.Spec.AutomaticScaling.Enable {
+			exp := reconcile.NewHPA(proxy)
 			if err := r.Create(ctx, exp); err != nil {
 				return ctrl.Result{}, err
 			}
-
 		}
-	} else {
-		if ssproxy.Spec.AutomaticScaling == nil || !ssproxy.Spec.AutomaticScaling.Enable {
-			if err := r.Delete(ctx, hpa); err != nil {
-				return ctrl.Result{}, err
-			}
-		} else {
-			act := hpa.DeepCopy()
-			exp := reconcile.UpdateHPA(ssproxy, act)
-			if err := r.Update(ctx, exp); err != nil {
-				return ctrl.Result{}, err
-			}
+		return ctrl.Result{}, nil
+	}
+
+	if err != nil {
+		return ctrl.Result{}, err
+	}
+
+	// If the HPA exists, but we don't want it, delete it
+	if proxy.Spec.AutomaticScaling == nil || !proxy.Spec.AutomaticScaling.Enable {
+		if err := r.Delete(ctx, hpa); err != nil {
+			return ctrl.Result{}, err
 		}
+		return ctrl.Result{}, nil
+	}
+
+	// If the HPA exists and we want it, update it
+	act := hpa.DeepCopy()
+	exp := reconcile.UpdateHPA(proxy, act)
+	if err := r.Update(ctx, exp); err != nil {
+		return ctrl.Result{}, err
 	}
 
 	return ctrl.Result{}, nil
@@ -183,24 +189,25 @@ func (r *ProxyReconciler) reconcileService(ctx context.Context, namespacedName t
 	}
 
 	service := &v1.Service{}
+	err = r.Get(ctx, namespacedName, service)
 
-	if err := r.Get(ctx, namespacedName, service); err != nil {
-		if !apierrors.IsNotFound(err) {
-			return ctrl.Result{}, err
-		} else {
-			exp := reconcile.NewService(ssproxy)
-			if err := r.Create(ctx, exp); err != nil {
-				return ctrl.Result{}, err
-			}
-		}
-	} else {
-		act := service.DeepCopy()
-		exp := reconcile.UpdateService(ssproxy, act)
-		if err := r.Update(ctx, exp); err != nil {
+	if apierrors.IsNotFound(err) {
+		exp := reconcile.NewService(ssproxy)
+		if err := r.Create(ctx, exp); err != nil {
 			return ctrl.Result{}, err
 		}
 	}
 
+	if err != nil {
+		return ctrl.Result{}, err
+	}
+
+	act := service.DeepCopy()
+	exp := reconcile.UpdateService(ssproxy, act)
+	if err := r.Update(ctx, exp); err != nil {
+		return ctrl.Result{}, err
+	}
+
 	return ctrl.Result{}, nil
 }
 
diff --git a/shardingsphere-operator/pkg/reconcile/computenode/configmap.go b/shardingsphere-operator/pkg/reconcile/computenode/configmap.go
index f59ccb3..3735541 100644
--- a/shardingsphere-operator/pkg/reconcile/computenode/configmap.go
+++ b/shardingsphere-operator/pkg/reconcile/computenode/configmap.go
@@ -59,15 +59,10 @@ func NewConfigMap(cn *v1alpha1.ComputeNode) *v1.ConfigMap {
 	// NOTE: ShardingSphere Proxy 5.3.0 needs a server.yaml no matter if it is empty
 	if !reflect.DeepEqual(cn.Spec.Bootstrap.ServerConfig, v1alpha1.ServerConfig{}) {
 		servconf := cn.Spec.Bootstrap.ServerConfig.DeepCopy()
-		if cn.Spec.Bootstrap.ServerConfig.Mode.Type == v1alpha1.ModeTypeCluster {
-			if len(cluster) > 0 {
-				if err := json.Unmarshal([]byte(cluster), &servconf.Mode.Repository); err != nil {
-					return &v1.ConfigMap{}
-				}
-			}
-		}
-		if y, err := yaml.Marshal(servconf); err == nil {
-			builder.SetServerConfig(string(y))
+		if y, err := updateConfigMapServerConf(cluster, servconf, cn); err == nil {
+			builder.SetServerConfig(y)
+		} else {
+			return &v1.ConfigMap{}
 		}
 	} else {
 		builder.SetServerConfig("# Empty file is needed")
@@ -84,6 +79,16 @@ func NewConfigMap(cn *v1alpha1.ComputeNode) *v1.ConfigMap {
 	return builder.Build()
 }
 
+func updateConfigMapServerConf(cluster string, servconf *v1alpha1.ServerConfig, cn *v1alpha1.ComputeNode) (string, error) {
+	if cn.Spec.Bootstrap.ServerConfig.Mode.Type == v1alpha1.ModeTypeCluster && len(cluster) > 0 {
+		if err := json.Unmarshal([]byte(cluster), &servconf.Mode.Repository); err != nil {
+			return "", err
+		}
+	}
+	y, err := yaml.Marshal(servconf)
+	return string(y), err
+}
+
 // ConfigMapBuilder is a builder for ConfigMap by ComputeNode
 type ConfigMapBuilder interface {
 	SetName(name string) ConfigMapBuilder
diff --git a/shardingsphere-operator/pkg/reconcile/computenode/service.go b/shardingsphere-operator/pkg/reconcile/computenode/service.go
index 6fa1ccf..276826d 100644
--- a/shardingsphere-operator/pkg/reconcile/computenode/service.go
+++ b/shardingsphere-operator/pkg/reconcile/computenode/service.go
@@ -129,29 +129,35 @@ func DefaultService(meta metav1.Object, gvk schema.GroupVersionKind) *corev1.Ser
 }
 
 // UpdateService update Service
-func UpdateService(cn *v1alpha1.ComputeNode, cur *corev1.Service) *corev1.Service {
-	exp := &corev1.Service{}
-	exp.ObjectMeta = cur.ObjectMeta
-	exp.Labels = cur.Labels
-	exp.Annotations = cur.Annotations
-	exp.Spec = NewService(cn).Spec
-	exp.Spec.ClusterIP = cur.Spec.ClusterIP
-	exp.Spec.ClusterIPs = cur.Spec.ClusterIPs
+func UpdateService(cn *v1alpha1.ComputeNode, svc *corev1.Service) *corev1.Service {
+	exp := NewService(cn)
+	exp.ObjectMeta = svc.ObjectMeta
+	exp.Spec.ClusterIP = svc.Spec.ClusterIP
+	exp.Spec.ClusterIPs = svc.Spec.ClusterIPs
 	if cn.Spec.ServiceType == corev1.ServiceTypeNodePort {
-		for pb := range cn.Spec.PortBindings {
-			for p := range cur.Spec.Ports {
-				if cn.Spec.PortBindings[pb].Name == cur.Spec.Ports[p].Name {
-					if cur.Spec.Ports[p].NodePort != 0 {
-						for pt := range exp.Spec.Ports {
-							if exp.Spec.Ports[pt].Name == cur.Spec.Ports[p].Name {
-								exp.Spec.Ports[pt].NodePort = cur.Spec.Ports[p].NodePort
-							}
-						}
-					}
+		exp.Spec.Ports = updateNodePorts(cn.Spec.PortBindings, svc.Spec.Ports)
+	}
+
+	return exp
+}
+
+func updateNodePorts(portbindings []v1alpha1.PortBinding, svcports []corev1.ServicePort) []corev1.ServicePort {
+	ports := []corev1.ServicePort{}
+	for pb := range portbindings {
+		for sp := range svcports {
+			if portbindings[pb].Name == svcports[sp].Name {
+				port := corev1.ServicePort{
+					Name:       portbindings[pb].Name,
+					TargetPort: intstr.FromInt(int(portbindings[pb].ContainerPort)),
+					Port:       portbindings[pb].ServicePort,
+					Protocol:   portbindings[pb].Protocol,
+				}
+				if svcports[sp].NodePort != 0 {
+					port.NodePort = svcports[sp].NodePort
 				}
+				ports = append(ports, port)
 			}
 		}
 	}
-
-	return exp
+	return ports
 }