You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@yunikorn.apache.org by "FrankYang0529 (via GitHub)" <gi...@apache.org> on 2023/06/04 09:21:37 UTC

[GitHub] [yunikorn-k8shim] FrankYang0529 opened a new pull request, #608: [YUNIKORN-1471] add preemption e2e test

FrankYang0529 opened a new pull request, #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608

   ### What is this PR for?
   
   Add preemption e2e tests.
   
   ### What type of PR is it?
   * [ ] - Bug Fix
   * [X] - Improvement
   * [ ] - Feature
   * [ ] - Documentation
   * [ ] - Hot Fix
   * [ ] - Refactoring
   
   ### Todos
   * [ ] - Task
   
   ### What is the Jira issue?
   https://issues.apache.org/jira/browse/YUNIKORN-1471
   
   ### How should this be tested?
   
   Run `make e2e_test`.
   
   ### Screenshots (if appropriate)
   
   ### Questions:
   * [ ] - The licenses files need update.
   * [ ] - There is breaking changes for older versions.
   * [ ] - It needs documentation.
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1221682639


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	err = kClient.TaintNodes(nodesToTaint, taintKey, "value", v1.TaintEffectNoSchedule)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	WorkerMemRes /= (1000 * 1000) // change to M
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Worker node %s available memory %dM\n", Worker, WorkerMemRes)
+
+	sleepPodMemLimit = int64(float64(WorkerMemRes) / 3)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Sleep pod limit memory %dM\n", sleepPodMemLimit)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	err := kClient.UntaintNodes(nodesToTaint, taintKey)
+	Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes "+strings.Join(nodesToTaint, ","))
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config

Review Comment:
   Updated it to use `UpdateCustomConfigMapWrapper` in each test case and use `RestoreConfigMapWrapper` in AfterEach.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1221683694


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	err = kClient.TaintNodes(nodesToTaint, taintKey, "value", v1.TaintEffectNoSchedule)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	WorkerMemRes /= (1000 * 1000) // change to M
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Worker node %s available memory %dM\n", Worker, WorkerMemRes)
+
+	sleepPodMemLimit = int64(float64(WorkerMemRes) / 3)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Sleep pod limit memory %dM\n", sleepPodMemLimit)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	err := kClient.UntaintNodes(nodesToTaint, taintKey)
+	Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes "+strings.Join(nodesToTaint, ","))
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		ginkgo.By("Update root.sandbox1 and root.sandbox2 with guaranteed memory " + strconv.FormatInt(sleepPodMemLimit, 10) + "M")
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}

Review Comment:
   Add a new function `createSandbox1SleepPodCofigs`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219837721


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	err = kClient.TaintNodes(nodesToTaint, taintKey, "value", v1.TaintEffectNoSchedule)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	WorkerMemRes /= (1000 * 1000) // change to M
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Worker node %s available memory %dM\n", Worker, WorkerMemRes)
+
+	sleepPodMemLimit = int64(float64(WorkerMemRes) / 3)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Sleep pod limit memory %dM\n", sleepPodMemLimit)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	err := kClient.UntaintNodes(nodesToTaint, taintKey)
+	Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes "+strings.Join(nodesToTaint, ","))
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config

Review Comment:
   Either update config in BeforeSuite and reset in AfterSuite, or do it BeforeEach/AfterEach. If you want to do this in every test case, then perform a reset step also.
   
   Right now, it's problematic because you don't perform restoration. If the config is the same for every test, then I suggest BeforeSuite/AfterSuite. There are examples for config saving/restoration in different suites, check those out.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218206968


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.AfterEach(func() {
+
+		// Delete all sleep pods
+		ginkgo.By("Delete all sleep pods")
+		pods, err := kClient.GetPodNamesFromNS(ns.Name)
+		if err == nil {
+			for _, each := range pods {
+				if strings.Contains(each, "sleep") {
+					ginkgo.By("Deleting sleep pod: " + each)
+					err = kClient.DeletePod(each, ns.Name)
+					if err != nil {
+						if statusErr, ok := err.(*k8serrors.StatusError); ok {
+							if statusErr.ErrStatus.Reason == metav1.StatusReasonNotFound {
+								fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to delete pod %s - reason is %s, it "+
+									"has been deleted in the meantime\n", each, statusErr.ErrStatus.Reason)
+								continue
+							}
+						}
+					}
+				}
+			}
+		} else {
+			fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to get pods from namespace %s - reason is %s\n", ns.Name, err.Error())
+		}

Review Comment:
   We have `KubeCtl.DeletePods()` can't we just re-use that? This code is convoluted, this level of nesting is not convenient to read.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219839459


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	err = kClient.TaintNodes(nodesToTaint, taintKey, "value", v1.TaintEffectNoSchedule)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	WorkerMemRes /= (1000 * 1000) // change to M
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Worker node %s available memory %dM\n", Worker, WorkerMemRes)
+
+	sleepPodMemLimit = int64(float64(WorkerMemRes) / 3)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Sleep pod limit memory %dM\n", sleepPodMemLimit)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	err := kClient.UntaintNodes(nodesToTaint, taintKey)
+	Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes "+strings.Join(nodesToTaint, ","))
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		ginkgo.By("Update root.sandbox1 and root.sandbox2 with guaranteed memory " + strconv.FormatInt(sleepPodMemLimit, 10) + "M")
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}

Review Comment:
   Pod initialization is almost the same in every test, except the "Time" part, think about extracting this to a function.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko closed pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko closed pull request #608: [YUNIKORN-1471] add preemption e2e test
URL: https://github.com/apache/yunikorn-k8shim/pull/608


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218209617


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.AfterEach(func() {
+
+		// Delete all sleep pods
+		ginkgo.By("Delete all sleep pods")
+		pods, err := kClient.GetPodNamesFromNS(ns.Name)
+		if err == nil {

Review Comment:
   In situations like this, I very much prefer the following:
   ```
   if err != nil {
    fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to get pods from namespace %s - reason is %s\n", ns.Name, err.Error())
   return
   }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1582603162

   > We should track for now and see if the tests keep on giving us issues.
   
   Hi @wilfred-s, I did some survey and created [YUNIKORN-1796](https://issues.apache.org/jira/browse/YUNIKORN-1796) for it.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] wilfred-s commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "wilfred-s (via GitHub)" <gi...@apache.org>.
wilfred-s commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1581837813

   The failure is in 2 different tests also for the different K8s versions. Looks like we have some flakiness in these tests.
   We should track for now and see if the tests keep on giving us issues.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218198881


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")

Review Comment:
   If you/we decide to keep this, it makes sense to extract code from this file, because taining/untaining seems to be a copy-paste.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219837721


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	err = kClient.TaintNodes(nodesToTaint, taintKey, "value", v1.TaintEffectNoSchedule)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	WorkerMemRes /= (1000 * 1000) // change to M
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Worker node %s available memory %dM\n", Worker, WorkerMemRes)
+
+	sleepPodMemLimit = int64(float64(WorkerMemRes) / 3)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+	fmt.Fprintf(ginkgo.GinkgoWriter, "Sleep pod limit memory %dM\n", sleepPodMemLimit)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	err := kClient.UntaintNodes(nodesToTaint, taintKey)
+	Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes "+strings.Join(nodesToTaint, ","))
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config

Review Comment:
   Either update config in BeforeSuite and reset in AfterSuite, or do it BeforeEach/AfterEach. If you want to do this in every test case, then perform a reset step also.
   
   Right now, it's problematic because you don't perform restoration (the precommit check came back green, so that's good, but still, let's restore the config). If the config is the same for every test, then I suggest BeforeSuite/AfterSuite. There are examples for config saving/restoration in different suites, check those out.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218209617


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.AfterEach(func() {
+
+		// Delete all sleep pods
+		ginkgo.By("Delete all sleep pods")
+		pods, err := kClient.GetPodNamesFromNS(ns.Name)
+		if err == nil {

Review Comment:
   In situations like this, I very much prefer the following:
   ```
   if err != nil {
    fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to get pods from namespace %s - reason is %s\n", ns.Name, err.Error())
    return
   }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218202600


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)

Review Comment:
   Nit: `60 * time.Second` should work too



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1580945207

   +1 LGTM this looks OK (pending test results)


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1581093362

   Both v1.25.3 and v1.23.13 plugin e2e test cases failed in predication, not preemption. Do we want to create another issue to follow up?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219568397


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.AfterEach(func() {
+
+		// Delete all sleep pods
+		ginkgo.By("Delete all sleep pods")
+		pods, err := kClient.GetPodNamesFromNS(ns.Name)
+		if err == nil {
+			for _, each := range pods {
+				if strings.Contains(each, "sleep") {
+					ginkgo.By("Deleting sleep pod: " + each)
+					err = kClient.DeletePod(each, ns.Name)
+					if err != nil {
+						if statusErr, ok := err.(*k8serrors.StatusError); ok {
+							if statusErr.ErrStatus.Reason == metav1.StatusReasonNotFound {
+								fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to delete pod %s - reason is %s, it "+
+									"has been deleted in the meantime\n", each, statusErr.ErrStatus.Reason)
+								continue
+							}
+						}
+					}
+				}
+			}
+		} else {
+			fmt.Fprintf(ginkgo.GinkgoWriter, "Failed to get pods from namespace %s - reason is %s\n", ns.Name, err.Error())
+		}

Review Comment:
   Thanks! It's much easier to use `kClient.DeletePods()`. Updated it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219562736


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")

Review Comment:
   I prefer to taint nodes because [cross-node preemption](https://yunikorn.apache.org/docs/next/design/preemption#non-goals) is in non-goals. I added `TaintNodes` and `UntaintNodes` to reduce some duplicated code.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219571443


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.AfterEach(func() {
+
+		// Delete all sleep pods
+		ginkgo.By("Delete all sleep pods")
+		pods, err := kClient.GetPodNamesFromNS(ns.Name)
+		if err == nil {

Review Comment:
   Yeah, I prefer the way you posted too. Updated it!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218197426


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)

Review Comment:
   Print this value to the console, might be helpful for debugging



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218195576


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")

Review Comment:
   Do we need to taint nodes? If we have proper queue config (guaranteed, max), what happens if we don't do this?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1584341639

   +1


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] codecov[bot] commented on pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "codecov[bot] (via GitHub)" <gi...@apache.org>.
codecov[bot] commented on PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#issuecomment-1578751187

   ## [Codecov](https://app.codecov.io/gh/apache/yunikorn-k8shim/pull/608?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=apache) Report
   > Merging [#608](https://app.codecov.io/gh/apache/yunikorn-k8shim/pull/608?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=apache) (115934a) into [master](https://app.codecov.io/gh/apache/yunikorn-k8shim/commit/9f41509e887740379d7b30fbd9f42ff7d1a1a109?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=apache) (9f41509) will **not change** coverage.
   > The diff coverage is `n/a`.
   
   ```diff
   @@           Coverage Diff           @@
   ##           master     #608   +/-   ##
   =======================================
     Coverage   70.87%   70.87%           
   =======================================
     Files          47       47           
     Lines        7972     7972           
   =======================================
     Hits         5650     5650           
     Misses       2117     2117           
     Partials      205      205           
   ```
   
   
   
   :mega: We’re building smart automated test selection to slash your CI/CD build times. [Learn more](https://about.codecov.io/iterative-testing/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=apache)
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219567482


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)
+	Ω(sleepPodMemLimit).NotTo(gomega.BeZero(), "Sleep pod memory limit cannot be zero")
+})
+
+var _ = ginkgo.AfterSuite(func() {
+
+	ginkgo.By("Untainting some nodes")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Untainting node " + nodeName)
+		err := kClient.UntaintNode(nodeName, taintKey)
+		Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from node "+nodeName)
+	}
+
+	ginkgo.By("Check Yunikorn's health")
+	checks, err := yunikorn.GetFailedHealthChecks()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(checks).To(gomega.Equal(""), checks)
+
+	testDescription := ginkgo.CurrentSpecReport()
+	if testDescription.Failed() {
+		tests.LogTestClusterInfoWrapper(testDescription.FailureMessage(), []string{ns.Name})
+		tests.LogYunikornContainer(testDescription.FailureMessage())
+	}
+	ginkgo.By("Tearing down namespace: " + ns.Name)
+	err = kClient.TearDownNamespace(ns.Name)
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+})
+
+var _ = ginkgo.Describe("Preemption", func() {
+	ginkgo.It("Verify_basic_preemption", func() {
+		ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "default", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 600, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs, sleepPod4Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				60)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// assert one of the pods in root.sandbox1 is preempted
+		ginkgo.By("One of the pods in root.sanbox1 is preempted")
+		sandbox1RunningPodsCnt := 0
+		pods, err := kClient.ListPodsByLabelSelector(dev, "queue=root.sandbox1")
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		for _, pod := range pods.Items {
+			if pod.DeletionTimestamp != nil {
+				continue
+			}
+			if pod.Status.Phase == v1.PodRunning {
+				sandbox1RunningPodsCnt++
+			}
+		}
+		Ω(sandbox1RunningPodsCnt).To(gomega.Equal(2), "One of the pods in root.sandbox1 should be preempted")
+	})
+
+	ginkgo.It("Verify_no_preemption_on_resources_less_than_guaranteed_value", func() {
+		ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, WorkerMemRes, "default", WorkerMemRes)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// pods in root.sandbox1 can be succeeded
+		ginkgo.By("The pods in root.sandbox1 can be succeeded")
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			err = kClient.WaitForPodScheduled(dev, config.Name, time.Duration(30)*time.Second)
+			gomega.Ω(err).NotTo(gomega.HaveOccurred())
+		}
+	})
+
+	ginkgo.It("Verify_no_preemption_outside_fence", func() {
+		ginkgo.By("The preemption can't go outside the fence.")
+		// update config
+		cm, err := kClient.GetConfigMap(constants.ConfigMapName, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+		cm.Data[configmanager.DefaultPolicyGroup] = fmt.Sprintf(preemptionQueueYamlFormat, sleepPodMemLimit, "fence", sleepPodMemLimit)
+		_, err = kClient.UpdateConfigMap(cm, configmanager.YuniKornTestConfig.YkNamespace)
+		Ω(err).NotTo(HaveOccurred())
+
+		// Define sleepPod
+		sleepPod1Configs := k8s.SleepPodConfig{Name: "sleepjob1", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod2Configs := k8s.SleepPodConfig{Name: "sleepjob2", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod3Configs := k8s.SleepPodConfig{Name: "sleepjob3", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox1"}}
+		sleepPod4Configs := k8s.SleepPodConfig{Name: "sleepjob4", NS: dev, Mem: sleepPodMemLimit, Time: 30, Optedout: true, Labels: map[string]string{"queue": "root.sandbox2"}}
+
+		// Deploy pods in root.sandbox1
+		for _, config := range []k8s.SleepPodConfig{sleepPod1Configs, sleepPod2Configs, sleepPod3Configs} {
+			ginkgo.By("Deploy the sleep pod " + config.Name + " to the development namespace")
+			sleepObj, podErr := k8s.InitSleepPod(config)
+			Ω(podErr).NotTo(gomega.HaveOccurred())
+			sleepRespPod, podErr := kClient.CreatePod(sleepObj, dev)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+
+			// Wait for pod to move to running state
+			podErr = kClient.WaitForPodBySelectorRunning(dev,
+				fmt.Sprintf("app=%s", sleepRespPod.ObjectMeta.Labels["app"]),
+				30)
+			gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+		}
+
+		// Deploy sleepjob4 pod in root.sandbox2
+		ginkgo.By("Deploy the sleep pod " + sleepPod4Configs.Name + " to the development namespace")
+		sleepObj, podErr := k8s.InitSleepPod(sleepPod4Configs)
+		Ω(podErr).NotTo(gomega.HaveOccurred())
+		sleepRespPod4, err := kClient.CreatePod(sleepObj, dev)
+		gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+		// sleepjob4 pod can't be scheduled before pods in root.sandbox1 are succeeded
+		ginkgo.By("The sleep pod " + sleepPod4Configs.Name + " can't be scheduled")
+		err = kClient.WaitForPodUnschedulable(sleepRespPod4, time.Duration(60)*time.Second)

Review Comment:
   Yes, updated all `time.Duration(x)*time.Second` to `x*time.Second`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] FrankYang0529 commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "FrankYang0529 (via GitHub)" <gi...@apache.org>.
FrankYang0529 commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1219566600


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)

Review Comment:
   Add a log to print `sleepPodMemLimit` and `WorkerMemRes`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [yunikorn-k8shim] pbacsko commented on a diff in pull request #608: [YUNIKORN-1471] add preemption e2e test

Posted by "pbacsko (via GitHub)" <gi...@apache.org>.
pbacsko commented on code in PR #608:
URL: https://github.com/apache/yunikorn-k8shim/pull/608#discussion_r1218197426


##########
test/e2e/preemption/preemption_test.go:
##########
@@ -0,0 +1,337 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package preemption_test
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/onsi/ginkgo/v2"
+	"github.com/onsi/gomega"
+	v1 "k8s.io/api/core/v1"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/apache/yunikorn-k8shim/pkg/common/constants"
+	tests "github.com/apache/yunikorn-k8shim/test/e2e"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+	"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var ns *v1.Namespace
+var dev = "dev" + common.RandSeq(5)
+var oldConfigMap = new(v1.ConfigMap)
+var annotation = "ann-" + common.RandSeq(10)
+
+// Nodes
+var Worker = ""
+var WorkerMemRes int64
+var sleepPodMemLimit int64
+var taintKey = "e2e_test_preemption"
+var nodesToTaint []string
+
+// Queues
+var preemptionQueueYamlFormat = `
+partitions:
+  - name: default
+    queues:
+      - name: root
+        submitacl: '*'
+        queues:
+          - name: sandbox1
+            submitacl: '*'
+            properties:
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+          - name: sandbox2
+            submitacl: '*'
+            properties:
+              preemption.policy: %s
+              preemption.delay: 1s
+            resources:
+              guaranteed:
+                memory: %dM
+`
+
+var _ = ginkgo.BeforeSuite(func() {
+	// Initializing kubectl client
+	kClient = k8s.KubeCtl{}
+	Ω(kClient.SetClient()).To(gomega.BeNil())
+	// Initializing rest client
+	restClient = yunikorn.RClient{}
+	Ω(restClient).NotTo(gomega.BeNil())
+
+	annotation = "ann-" + common.RandSeq(10)
+	yunikorn.EnsureYuniKornConfigsPresent()
+	yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+
+	ginkgo.By("Port-forward the scheduler pod")
+	var err = kClient.PortForwardYkSchedulerPod()
+	Ω(err).NotTo(gomega.HaveOccurred())
+
+	ginkgo.By("create development namespace")
+	ns, err = kClient.CreateNamespace(dev, nil)
+	gomega.Ω(err).NotTo(gomega.HaveOccurred())
+	gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+
+	var nodes *v1.NodeList
+	nodes, err = kClient.GetNodes()
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+
+	// Extract node allocatable resources
+	for _, node := range nodes.Items {
+		// skip master if it's marked as such
+		node := node
+		if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+			continue
+		}
+		if Worker == "" {
+			Worker = node.Name
+		} else {
+			nodesToTaint = append(nodesToTaint, node.Name)
+		}
+	}
+	Ω(Worker).NotTo(gomega.BeEmpty(), "Worker node not found")
+
+	ginkgo.By("Tainting some nodes..")
+	for _, nodeName := range nodesToTaint {
+		ginkgo.By("Tainting node " + nodeName)
+		err = kClient.TaintNode(nodeName, taintKey, "value", v1.TaintEffectNoSchedule)
+		Ω(err).NotTo(gomega.HaveOccurred())
+	}
+
+	nodesDAOInfo, err := restClient.GetNodes(constants.DefaultPartition)
+	Ω(err).NotTo(gomega.HaveOccurred())
+	Ω(nodesDAOInfo).NotTo(gomega.BeNil())
+
+	for _, node := range *nodesDAOInfo {
+		if node.NodeID == Worker {
+			WorkerMemRes = node.Available["memory"]
+		}
+	}
+	sleepPodMemLimit = int64(float64(WorkerMemRes)/3) / (1000 * 1000)

Review Comment:
   Print this value to the console



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscribe@yunikorn.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org