You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@yunikorn.apache.org by br...@apache.org on 2024/03/16 12:19:45 UTC
(yunikorn-k8shim) branch master updated: [YUNIKORN-2492] Delete unused function parameter (#804)
This is an automated email from the ASF dual-hosted git repository.
brandboat pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new 67a3c33f [YUNIKORN-2492] Delete unused function parameter (#804)
67a3c33f is described below
commit 67a3c33ffde6e87276b08e54caf291fbdd2b1720
Author: amikai <as...@gmail.com>
AuthorDate: Sat Mar 16 20:18:49 2024 +0800
[YUNIKORN-2492] Delete unused function parameter (#804)
Closes: #804
Signed-off-by: brandboat <br...@gmail.com>
---
.../admission_controller_suite_test.go | 6 ++--
test/e2e/basic_scheduling/basic_scheduling_test.go | 5 ++--
test/e2e/bin_packing/bin_packing_suite_test.go | 5 ++--
test/e2e/configmap/configmap_test.go | 2 +-
test/e2e/framework/helpers/yunikorn/wrappers.go | 12 ++++----
.../gang_scheduling/gang_scheduling_suite_test.go | 6 ++--
.../node_resources/node_resources_suite_test.go | 6 ++--
test/e2e/predicates/predicates_suite_test.go | 6 ++--
test/e2e/preemption/preemption_test.go | 18 ++++--------
.../priority_scheduling_suite_test.go | 6 ++--
.../priority_scheduling_test.go | 15 ++++------
.../queue_quota_mgmt_suite_test.go | 5 ++--
.../recovery_and_restart_test.go | 4 +--
.../resource_fairness_suite_test.go | 4 +--
.../resource_fairness/resource_fairness_test.go | 3 +-
test/e2e/simple_preemptor/simple_preemptor_test.go | 6 ++--
.../spark_jobs_scheduling_suite_test.go | 6 ++--
test/e2e/user_group_limit/user_group_limit_test.go | 33 ++++++++--------------
18 files changed, 52 insertions(+), 96 deletions(-)
diff --git a/test/e2e/admission_controller/admission_controller_suite_test.go b/test/e2e/admission_controller/admission_controller_suite_test.go
index 047ac12c..1db0e3cf 100644
--- a/test/e2e/admission_controller/admission_controller_suite_test.go
+++ b/test/e2e/admission_controller/admission_controller_suite_test.go
@@ -55,7 +55,6 @@ var oldConfigMap = new(v1.ConfigMap)
var one = int32(1)
var preemptPolicyNever = v1.PreemptNever
var preemptPolicyPreemptLower = v1.PreemptLowerPriority
-var annotation = "ann-" + common.RandSeq(10)
var testPod = v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -197,9 +196,8 @@ var _ = BeforeSuite(func() {
kubeClient = k8s.KubeCtl{}
Expect(kubeClient.SetClient()).To(BeNil())
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
By("Port-forward the scheduler pod")
err := kubeClient.PortForwardYkSchedulerPod()
@@ -234,5 +232,5 @@ var _ = AfterSuite(func() {
err = kubeClient.DeletePriorityClass(testPreemptPriorityClass.Name)
Ω(err).ShouldNot(HaveOccurred())
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
diff --git a/test/e2e/basic_scheduling/basic_scheduling_test.go b/test/e2e/basic_scheduling/basic_scheduling_test.go
index cca1674b..f6d74801 100644
--- a/test/e2e/basic_scheduling/basic_scheduling_test.go
+++ b/test/e2e/basic_scheduling/basic_scheduling_test.go
@@ -39,7 +39,6 @@ var restClient yunikorn.RClient
var sleepRespPod *v1.Pod
var dev = "dev" + common.RandSeq(5)
var appsInfo *dao.ApplicationDAOInfo
-var annotation = "ann-" + common.RandSeq(10)
var oldConfigMap = new(v1.ConfigMap)
// Define sleepPod
@@ -60,7 +59,7 @@ var _ = ginkgo.BeforeSuite(func() {
err := kClient.PortForwardYkSchedulerPod()
Ω(err).NotTo(HaveOccurred())
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo")
ginkgo.By("create development namespace")
ns1, err := kClient.CreateNamespace(dev, nil)
@@ -86,7 +85,7 @@ var _ = ginkgo.AfterSuite(func() {
err := kClient.TearDownNamespace(dev)
Ω(err).NotTo(HaveOccurred())
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
var _ = ginkgo.Describe("", func() {
diff --git a/test/e2e/bin_packing/bin_packing_suite_test.go b/test/e2e/bin_packing/bin_packing_suite_test.go
index 9bc30b87..ae2b492b 100644
--- a/test/e2e/bin_packing/bin_packing_suite_test.go
+++ b/test/e2e/bin_packing/bin_packing_suite_test.go
@@ -56,7 +56,6 @@ func TestBinPacking(t *testing.T) {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var kClient = k8s.KubeCtl{} //nolint
var _ = BeforeSuite(func() {
@@ -82,7 +81,7 @@ var _ = BeforeSuite(func() {
*/
By("Enabling new scheduling config with binpacking node sort policy")
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", func(sc *configs.SchedulerConfig) error {
setErr := common.SetSchedulingPolicy(sc, "default", "root", "fifo")
if setErr != nil {
return setErr
@@ -112,7 +111,7 @@ var _ = BeforeSuite(func() {
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
var Describe = ginkgo.Describe
diff --git a/test/e2e/configmap/configmap_test.go b/test/e2e/configmap/configmap_test.go
index 658c537d..434d8f2a 100644
--- a/test/e2e/configmap/configmap_test.go
+++ b/test/e2e/configmap/configmap_test.go
@@ -102,7 +102,7 @@ var _ = Describe("ConfigMap", func() {
AfterEach(func() {
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{"default"})
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, "")
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
})
diff --git a/test/e2e/framework/helpers/yunikorn/wrappers.go b/test/e2e/framework/helpers/yunikorn/wrappers.go
index 07476eb4..e0ccb778 100644
--- a/test/e2e/framework/helpers/yunikorn/wrappers.go
+++ b/test/e2e/framework/helpers/yunikorn/wrappers.go
@@ -63,17 +63,17 @@ func EnsureYuniKornConfigsPresent() {
}
}
-func UpdateConfigMapWrapper(oldConfigMap *v1.ConfigMap, schedPolicy string, annotation string) {
- UpdateCustomConfigMapWrapper(oldConfigMap, schedPolicy, annotation, func(sc *configs.SchedulerConfig) error {
+func UpdateConfigMapWrapper(oldConfigMap *v1.ConfigMap, schedPolicy string) {
+ UpdateCustomConfigMapWrapper(oldConfigMap, schedPolicy, func(sc *configs.SchedulerConfig) error {
return nil
})
}
-func UpdateCustomConfigMapWrapper(oldConfigMap *v1.ConfigMap, schedPolicy string, annotation string, mutator func(sc *configs.SchedulerConfig) error) {
- UpdateCustomConfigMapWrapperWithMap(oldConfigMap, schedPolicy, annotation, nil, mutator)
+func UpdateCustomConfigMapWrapper(oldConfigMap *v1.ConfigMap, schedPolicy string, mutator func(sc *configs.SchedulerConfig) error) {
+ UpdateCustomConfigMapWrapperWithMap(oldConfigMap, schedPolicy, nil, mutator)
}
-func UpdateCustomConfigMapWrapperWithMap(oldConfigMap *v1.ConfigMap, schedPolicy string, annotation string, customMap map[string]string, mutator func(sc *configs.SchedulerConfig) error) {
+func UpdateCustomConfigMapWrapperWithMap(oldConfigMap *v1.ConfigMap, schedPolicy string, customMap map[string]string, mutator func(sc *configs.SchedulerConfig) error) {
Ω(k.SetClient()).To(BeNil())
By("Port-forward the scheduler pod")
fwdErr := k.PortForwardYkSchedulerPod()
@@ -121,7 +121,7 @@ func UpdateCustomConfigMapWrapperWithMap(oldConfigMap *v1.ConfigMap, schedPolicy
Ω(err).NotTo(HaveOccurred())
}
-func RestoreConfigMapWrapper(oldConfigMap *v1.ConfigMap, annotation string) {
+func RestoreConfigMapWrapper(oldConfigMap *v1.ConfigMap) {
Ω(k.SetClient()).To(BeNil())
By("Restoring the old config maps")
var c, err = k.GetConfigMaps(configmanager.YuniKornTestConfig.YkNamespace,
diff --git a/test/e2e/gang_scheduling/gang_scheduling_suite_test.go b/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
index 1a73ac06..1f7bbc04 100644
--- a/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
+++ b/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
@@ -55,19 +55,17 @@ func TestGangScheduling(t *testing.T) {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var kClient = k8s.KubeCtl{} //nolint
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo")
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
// Declarations for Ginkgo DSL
diff --git a/test/e2e/node_resources/node_resources_suite_test.go b/test/e2e/node_resources/node_resources_suite_test.go
index de1b95ef..181be5e6 100644
--- a/test/e2e/node_resources/node_resources_suite_test.go
+++ b/test/e2e/node_resources/node_resources_suite_test.go
@@ -54,18 +54,16 @@ func TestNodeResources(t *testing.T) {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
// Declarations for Ginkgo DSL
diff --git a/test/e2e/predicates/predicates_suite_test.go b/test/e2e/predicates/predicates_suite_test.go
index 36d543f0..f3ea9872 100644
--- a/test/e2e/predicates/predicates_suite_test.go
+++ b/test/e2e/predicates/predicates_suite_test.go
@@ -39,18 +39,16 @@ func init() {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation string
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo")
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
func TestPredicates(t *testing.T) {
diff --git a/test/e2e/preemption/preemption_test.go b/test/e2e/preemption/preemption_test.go
index bf3cb4ce..001c6288 100644
--- a/test/e2e/preemption/preemption_test.go
+++ b/test/e2e/preemption/preemption_test.go
@@ -44,7 +44,6 @@ var restClient yunikorn.RClient
var ns *v1.Namespace
var dev string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
// Nodes
var Worker = ""
@@ -141,8 +140,7 @@ var _ = ginkgo.Describe("Preemption", func() {
ginkgo.By("A queue uses resource more than the guaranteed value even after removing one of the pods. The cluster doesn't have enough resource to deploy a pod in another queue which uses resource less than the guaranteed value.")
// update config
ginkgo.By(fmt.Sprintf("Update root.sandbox1 and root.sandbox2 with guaranteed memory %dM", sleepPodMemLimit))
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -204,8 +202,7 @@ var _ = ginkgo.Describe("Preemption", func() {
ginkgo.By("A queue uses resource less than the guaranteed value can't be preempted.")
// update config
ginkgo.By(fmt.Sprintf("Update root.sandbox1 and root.sandbox2 with guaranteed memory %dM", WorkerMemRes))
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -271,8 +268,7 @@ var _ = ginkgo.Describe("Preemption", func() {
ginkgo.By("The preemption can't go outside the fence.")
// update config
ginkgo.By(fmt.Sprintf("Update root.sandbox1 and root.sandbox2 with guaranteed memory %dM. The root.sandbox2 has fence preemption policy.", sleepPodMemLimit))
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -338,8 +334,7 @@ var _ = ginkgo.Describe("Preemption", func() {
ginkgo.By("A task can only preempt a task with lower or equal priority")
// update config
ginkgo.By(fmt.Sprintf("Update root.sandbox1, root.low-priority, root.high-priority with guaranteed memory %dM", sleepPodMemLimit))
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -433,8 +428,7 @@ var _ = ginkgo.Describe("Preemption", func() {
ginkgo.By("The value of 'false' for the allow preemption annotation on the PriorityClass moves the Pod to the back of the preemption list")
// update config
ginkgo.By(fmt.Sprintf("Update root.sandbox3, root.sandbox4 and root.sandbox5 with guaranteed memory %dM", sleepPodMemLimit2))
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -560,7 +554,7 @@ var _ = ginkgo.Describe("Preemption", func() {
// reset config
ginkgo.By("Restoring YuniKorn configuration")
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
})
diff --git a/test/e2e/priority_scheduling/priority_scheduling_suite_test.go b/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
index b5f40f77..538ce0d8 100644
--- a/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
+++ b/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
@@ -85,7 +85,6 @@ var normalPriorityClass = schedulingv1.PriorityClass{
PreemptionPolicy: &preemptPolicyNever,
}
-var annotation = "ann-" + common.RandSeq(10)
var oldConfigMap = new(v1.ConfigMap)
var _ = ginkgo.BeforeSuite(func() {
@@ -95,9 +94,8 @@ var _ = ginkgo.BeforeSuite(func() {
kubeClient = k8s.KubeCtl{}
Expect(kubeClient.SetClient()).To(BeNil())
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
By(fmt.Sprintf("Creating priority class %s", lowPriorityClass.Name))
_, err = kubeClient.CreatePriorityClass(&lowPriorityClass)
@@ -129,7 +127,7 @@ var _ = ginkgo.AfterSuite(func() {
err = kubeClient.DeletePriorityClass(lowPriorityClass.Name)
Ω(err).ShouldNot(HaveOccurred())
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
var By = ginkgo.By
diff --git a/test/e2e/priority_scheduling/priority_scheduling_test.go b/test/e2e/priority_scheduling/priority_scheduling_test.go
index dc3540de..d1d50e3a 100644
--- a/test/e2e/priority_scheduling/priority_scheduling_test.go
+++ b/test/e2e/priority_scheduling/priority_scheduling_test.go
@@ -56,7 +56,6 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
var namespace *v1.Namespace
var err error
var oldConfigMap = new(v1.ConfigMap)
- var annotation string
var sleepPodConf, lowPodConf, normalPodConf, highPodConf k8s.TestPodConfig
ginkgo.BeforeEach(func() {
@@ -70,8 +69,7 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
ginkgo.It("Verify_Static_Queue_App_Scheduling_Order", func() {
By("Setting custom YuniKorn configuration")
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -136,8 +134,7 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
ginkgo.It("Verify_Dynamic_Queue_App_Scheduling_Order", func() {
By("Setting custom YuniKorn configuration")
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo")
By(fmt.Sprintf("Update test namespace quota %s", ns))
namespace, err = kubeClient.UpdateNamespace(ns, map[string]string{
@@ -181,8 +178,7 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
ginkgo.It("Verify_Priority_Offset_Queue_App_Scheduling_Order", func() {
By("Setting custom YuniKorn configuration")
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -256,8 +252,7 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
ginkgo.It("Verify_Gang_Scheduling_With_Priority", func() {
By("Setting custom YuniKorn configuration")
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fifo", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -405,7 +400,7 @@ var _ = ginkgo.Describe("PriorityScheduling", func() {
Ω(err).ShouldNot(HaveOccurred())
By("Restoring YuniKorn configuration")
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
})
diff --git a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
index e1422913..d6a7452d 100644
--- a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
+++ b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
@@ -41,17 +41,16 @@ func init() {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
func TestQueueQuotaMgmt(t *testing.T) {
diff --git a/test/e2e/recovery_and_restart/recovery_and_restart_test.go b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
index 8257d85d..d06dd489 100644
--- a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
+++ b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
@@ -53,7 +53,6 @@ var restClient yunikorn.RClient
var oldConfigMap = new(v1.ConfigMap)
var sleepRespPod *v1.Pod
var dev = "dev" + common.RandSeq(5)
-var annotation = "ann-" + common.RandSeq(10)
// Define sleepPod
var sleepPodConfigs = k8s.SleepPodConfig{Name: "sleepjob", NS: dev}
@@ -68,9 +67,8 @@ var _ = ginkgo.BeforeSuite(func() {
// Initializing rest client
restClient = yunikorn.RClient{}
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
ginkgo.By("create development namespace")
ns1, err := kClient.CreateNamespace(dev, nil)
diff --git a/test/e2e/resource_fairness/resource_fairness_suite_test.go b/test/e2e/resource_fairness/resource_fairness_suite_test.go
index 7c517978..ff389ec4 100644
--- a/test/e2e/resource_fairness/resource_fairness_suite_test.go
+++ b/test/e2e/resource_fairness/resource_fairness_suite_test.go
@@ -40,18 +40,16 @@ func init() {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var kClient = k8s.KubeCtl{} //nolint
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
Ω(kClient.SetClient()).To(BeNil())
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
func TestResourceFairness(t *testing.T) {
diff --git a/test/e2e/resource_fairness/resource_fairness_test.go b/test/e2e/resource_fairness/resource_fairness_test.go
index f003bc4f..0e6974a7 100644
--- a/test/e2e/resource_fairness/resource_fairness_test.go
+++ b/test/e2e/resource_fairness/resource_fairness_test.go
@@ -54,8 +54,7 @@ var _ = Describe("FairScheduling:", func() {
Ω(kClient.SetClient()).To(BeNil())
By("Setting custom YuniKorn configuration")
- annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fair", annotation, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fair", func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
if err = common.AddQueue(sc, "default", "root", configs.QueueConfig{
diff --git a/test/e2e/simple_preemptor/simple_preemptor_test.go b/test/e2e/simple_preemptor/simple_preemptor_test.go
index 8f551f68..6af05a0c 100644
--- a/test/e2e/simple_preemptor/simple_preemptor_test.go
+++ b/test/e2e/simple_preemptor/simple_preemptor_test.go
@@ -43,7 +43,6 @@ var restClient yunikorn.RClient
var ns *v1.Namespace
var dev = "dev" + common.RandSeq(5)
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
// Nodes
var Worker1 = ""
@@ -65,9 +64,8 @@ var _ = ginkgo.BeforeSuite(func() {
restClient = yunikorn.RClient{}
Ω(restClient).NotTo(gomega.BeNil())
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
ginkgo.By("Port-forward the scheduler pod")
var err = kClient.PortForwardYkSchedulerPod()
@@ -142,7 +140,7 @@ var _ = ginkgo.AfterSuite(func() {
err = kClient.TearDownNamespace(ns.Name)
Ω(err).NotTo(gomega.HaveOccurred())
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
var _ = ginkgo.Describe("SimplePreemptor", func() {
diff --git a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
index cfe7042b..3d35bab6 100644
--- a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
+++ b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
@@ -39,17 +39,15 @@ func init() {
var suiteName string
var oldConfigMap = new(v1.ConfigMap)
-var annotation = "ann-" + common.RandSeq(10)
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- annotation = "ann-" + common.RandSeq(10)
yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo", annotation)
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "fifo")
})
var _ = AfterSuite(func() {
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
func TestSparkJobs(t *testing.T) {
diff --git a/test/e2e/user_group_limit/user_group_limit_test.go b/test/e2e/user_group_limit/user_group_limit_test.go
index 452cbc30..46aaeb80 100644
--- a/test/e2e/user_group_limit/user_group_limit_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_test.go
@@ -67,7 +67,6 @@ var (
ns *v1.Namespace
dev = "dev" + common.RandSeq(5)
oldConfigMap = new(v1.ConfigMap)
- annotation = "ann-" + common.RandSeq(10)
admissionCustomConfig = map[string]string{
"log.core.scheduler.ugm.level": "debug",
amconf.AMAccessControlBypassAuth: constants.True,
@@ -109,10 +108,9 @@ var _ = ginkgo.AfterSuite(func() {
var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_specific_user_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -157,10 +155,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_specific_user_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -205,10 +202,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_specific_group_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -251,10 +247,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_specific_group_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -297,10 +292,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_user_limit_lower_than_group_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -342,10 +336,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_group_limit_lower_than_user_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -387,10 +380,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_wildcard_user_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -435,10 +427,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_wildcard_user_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -483,10 +474,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_wildcard_group_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -531,10 +521,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_wildcard_group_limit", func() {
ginkgo.By("Update config")
- annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil
@@ -589,7 +578,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
// reset config
ginkgo.By("Restoring YuniKorn configuration")
- yunikorn.RestoreConfigMapWrapper(oldConfigMap, annotation)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
})
})
---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@yunikorn.apache.org
For additional commands, e-mail: issues-help@yunikorn.apache.org