Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Randomize Unit Tests #108

Merged
merged 3 commits into from
Dec 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -180,11 +180,13 @@ test: test-no-verify verify-unchanged ## Generate and format code, run tests, ge
.PHONY: test-no-verify
# -r: If set, ginkgo finds and runs test suites under the current directory recursively.
# --keep-going: If set, failures from earlier test suites do not prevent later test suites from running.
# --randomize-all If set, ginkgo will randomize all specs together.
# By default, ginkgo only randomizes the top level Describe, Context and When containers
# --require-suite: If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs.
# --vv: If set, emits with maximal verbosity - includes skipped and pending tests.
test-no-verify: manifests generate go-verify fmt vet fix-imports envtest ginkgo # Generate and format code, and run tests
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR)/$(ENVTEST_VERSION) -p path)" \
$(GINKGO) -r --keep-going --require-suite --vv -coverprofile cover.out ./pkg/... ./controllers/...
$(GINKGO) -r --keep-going --randomize-all --require-suite --vv --coverprofile cover.out ./pkg/... ./controllers/...

.PHONY: bundle-run
export BUNDLE_RUN_NAMESPACE ?= openshift-operators
Expand Down
52 changes: 28 additions & 24 deletions controllers/fenceagentsremediation_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ var _ = Describe("FAR Controller", func() {
invalidValTestFAR := getFenceAgentsRemediation(workerNode, fenceAgentIPMI, invalidShareParam, testNodeParam)
invalidShareString, err := buildFenceAgentParams(invalidValTestFAR)
Expect(err).NotTo(HaveOccurred())
underTestFAR.ObjectMeta.Name = workerNode
validShareString, err := buildFenceAgentParams(underTestFAR)
Expect(err).NotTo(HaveOccurred())
// Eventually buildFenceAgentParams would return the same shareParam
Expand All @@ -126,7 +127,6 @@ var _ = Describe("FAR Controller", func() {
nodeKey := client.ObjectKey{Name: workerNode}
farNamespacedName := client.ObjectKey{Name: workerNode, Namespace: defaultNamespace}
farNoExecuteTaint := utils.CreateFARNoExecuteTaint()
resourceDeletionWasTriggered := true // corresponds to testVADeletion bool value
conditionStatusPointer := func(status metav1.ConditionStatus) *metav1.ConditionStatus { return &status }
BeforeEach(func() {
// Create two VAs and two pods, and at the end clean them up with DeferCleanup
Expand All @@ -149,9 +149,12 @@ var _ = Describe("FAR Controller", func() {
When("creating valid FAR CR", func() {
BeforeEach(func() {
node = utils.GetNode("", workerNode)
underTestFAR = getFenceAgentsRemediation(workerNode, fenceAgentIPMI, testShareParam, testNodeParam)
})
It("should have finalizer, taint, while the two VAs and one pod will be deleted", func() {
By("Searching for remediation taint")
nodeKey = client.ObjectKey{Name: workerNode}
farNamespacedName = client.ObjectKey{Name: workerNode, Namespace: defaultNamespace}
Eventually(func(g Gomega) bool {
g.Expect(k8sClient.Get(context.Background(), nodeKey, node)).To(Succeed())
g.Expect(k8sClient.Get(context.Background(), farNamespacedName, underTestFAR)).To(Succeed())
Expand All @@ -164,7 +167,7 @@ var _ = Describe("FAR Controller", func() {
Expect(controllerutil.ContainsFinalizer(underTestFAR, v1alpha1.FARFinalizer)).To(BeTrue())

By("Not having any test pod")
testPodDeletion(testPodName, resourceDeletionWasTriggered)
verifyPodDeleted(testPodName)

By("Verifying correct conditions for successfull remediation")
Expect(underTestFAR.Status.LastUpdateTime).ToNot(BeNil())
Expand All @@ -180,11 +183,11 @@ var _ = Describe("FAR Controller", func() {
})
It("should not have a finalizer nor taint, while the two VAs and one pod will remain", func() {
By("Not finding a matching node to FAR CR's name")
nodeKey.Name = underTestFAR.Name
nodeKey = client.ObjectKey{Name: underTestFAR.Name}
Expect(k8sClient.Get(context.Background(), nodeKey, node)).To(Not(Succeed()))

By("Not having finalizer")
farNamespacedName.Name = underTestFAR.Name
farNamespacedName = client.ObjectKey{Name: underTestFAR.Name, Namespace: defaultNamespace}
Consistently(func(g Gomega) bool {
g.Expect(k8sClient.Get(context.Background(), farNamespacedName, underTestFAR)).To(Succeed())
return controllerutil.ContainsFinalizer(underTestFAR, v1alpha1.FARFinalizer)
Expand All @@ -195,8 +198,7 @@ var _ = Describe("FAR Controller", func() {
Expect(utils.TaintExists(node.Spec.Taints, &farNoExecuteTaint)).To(BeFalse())

By("Still having one test pod")
resourceDeletionWasTriggered = false
testPodDeletion(testPodName, resourceDeletionWasTriggered)
verifyPodExists(testPodName)

By("Verifying correct conditions for unsuccessfull remediation")
Expect(underTestFAR.Status.LastUpdateTime).ToNot(BeNil())
Expand Down Expand Up @@ -311,30 +313,32 @@ func cliCommandsEquality(far *v1alpha1.FenceAgentsRemediation) (bool, error) {
return isEqualStringLists(mocksExecuter.command, expectedCommand), nil
}

// testPodDeletion tests whether the pod no longer exist for successful FAR CR
// and consistently check if the pod exist and was not deleted
func testPodDeletion(podName string, resourceDeletionWasTriggered bool) {
// verifyPodDeleted verifies whether the pod no longer exist for successful FAR CR
func verifyPodDeleted(podName string) {
pod := &corev1.Pod{}
podKey := client.ObjectKey{
Namespace: defaultNamespace,
Name: podName,
}
if resourceDeletionWasTriggered {
EventuallyWithOffset(1, func() bool {
pod := &corev1.Pod{}
err := k8sClient.Get(context.Background(), podKey, pod)
return apierrors.IsNotFound(err)

}, timeoutDeletion, pollInterval).Should(BeTrue())
log.Info("Pod is no longer exist", "pod", podName)
} else {
ConsistentlyWithOffset(1, func() bool {
pod := &corev1.Pod{}
err := k8sClient.Get(context.Background(), podKey, pod)
return apierrors.IsNotFound(err)
EventuallyWithOffset(1, func() bool {
err := k8sClient.Get(context.Background(), podKey, pod)
return apierrors.IsNotFound(err)
}, timeoutDeletion, pollInterval).Should(BeTrue())
log.Info("Pod is no longer exist", "pod", podName)
}

}, timeoutDeletion, pollInterval).Should(BeFalse())
log.Info("Pod exist", "pod", podName)
// verifyPodExists verifies whether the pod exist and was not deleted
func verifyPodExists(podName string) {
pod := &corev1.Pod{}
podKey := client.ObjectKey{
Namespace: defaultNamespace,
Name: podName,
}
ConsistentlyWithOffset(1, func() bool {
err := k8sClient.Get(context.Background(), podKey, pod)
return apierrors.IsNotFound(err)
}, timeoutDeletion, pollInterval).Should(BeFalse())
log.Info("Pod exist", "pod", podName)
}

// verifyStatusCondition checks if the status condition is not set, and if it is set then it has an expected value
Expand Down