Skip to content

Commit

Permalink
[YUNIKORN-1957] Fixing golint issue
Browse files Browse the repository at this point in the history
  • Loading branch information
rrajesh-cloudera authored and craigcondit committed Sep 6, 2024
1 parent 9d9919d commit 7f70e88
Showing 1 changed file with 19 additions and 9 deletions.
28 changes: 19 additions & 9 deletions test/e2e/user_group_limit/user_group_limit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -716,8 +716,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -738,6 +737,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand Down Expand Up @@ -765,15 +767,15 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
usergroup2Sandbox1Pod3 := deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxapplications")
checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2})

//Update Wildcard user entry limit to 3
// Update Wildcard user entry limit to 3
ginkgo.By("Update config")
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -794,6 +796,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand All @@ -814,7 +819,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -834,6 +839,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
}})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand Down Expand Up @@ -861,15 +869,15 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
usergroup2 = &si.UserGroupInformation{User: user2, Groups: []string{group2}}
group2Sandbox1Pod3 := deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxapplications")
checkUsageWildcardGroups(groupTestType, group2, sandboxQueue1, []*v1.Pod{group2Sandbox1Pod1, group2Sandbox1Pod2})
//Update Wildcard group entry limit to 3
// Update Wildcard group entry limit to 3
ginkgo.By("Update config")
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Expand All @@ -889,6 +897,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
},
},
}})
if err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})
Expand All @@ -901,7 +912,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
})

ginkgo.AfterEach(func() {
//tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})

// Delete all sleep pods
ginkgo.By("Delete all sleep pods")
Expand Down Expand Up @@ -1010,5 +1021,4 @@ func checkUsageWildcardGroups(testType TestType, name string, queuePath string,
Ω(resourceUsageDAO.ResourceUsage).NotTo(gomega.BeNil())
Ω(resourceUsageDAO.ResourceUsage.Resources["pods"]).To(gomega.Equal(resources.Quantity(len(expectedRunningPods))))
Ω(resourceUsageDAO.RunningApplications).To(gomega.ConsistOf(appIDs...))

}

0 comments on commit 7f70e88

Please sign in to comment.