Skip to content

Commit

Permalink
Typo fix
Browse files Browse the repository at this point in the history
Function declaration, comments, it's vs its confusion and etc.
  • Loading branch information
razo7 committed Jul 27, 2023
1 parent a99eff0 commit f6921cd
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 24 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,15 @@ Then, run `operator-sdk run bundle quay.io/medik8s/fence-agents-remediation-oper
FAR is recommended for using with NHC to create a complete solution for unhealty nodes, since NHC detects unhelthy nodes and creates an extrenal remediation CR, e.g., FAR's CR, for unhealthy nodes.
This automated way is preferable as it gives the responsibily on FAR CRs (creation and deletion) to NHC, even though FAR can also act as standalone remediator, but it with expense from the administrator to create and delete CRs.

Either way a user must be familier with fence agent to be used - Knowing it's parameters and any other requirements on the cluster (e.g., fence_ipmilan needs machines that support IPMI).
Either way a user must be familier with fence agent to be used - Knowing its parameters and any other requirements on the cluster (e.g., fence_ipmilan needs machines that support IPMI).

### FAR with NHC

* Install FAR using one of the above options ([Installation](#installation)).

* Load the yaml manifest of the FAR template (see below).

* Modify NHC CR to use FAR as it's remediator -
* Modify NHC CR to use FAR as its remediator -
This is basically a specific use case of an [external remediation of NHC CR](https://github.com/medik8s/node-healthcheck-operator#external-remediation-resources).
In order to set it up, please make sure that Node Health Check is running, FAR controller exists and then creates the necessary CRs (*FenceAgentsRemediationTemplate* and then *NodeHealthCheck*).

Expand Down
4 changes: 2 additions & 2 deletions controllers/fenceagentsremediation_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func (r *FenceAgentsRemediationReconciler) Reconcile(ctx context.Context, req ct
r.Log.Info("Fetch FAR's pod")
pod, err := utils.GetFenceAgentsRemediationPod(r.Client)
if err != nil {
r.Log.Error(err, "Can't find FAR's pod by it's label", "CR's Name", req.Name)
r.Log.Error(err, "Can't find FAR's pod by its label", "CR's Name", req.Name)
return emptyResult, err
}
//TODO: Check that FA is excutable? run cli.IsExecuteable
Expand Down Expand Up @@ -197,7 +197,7 @@ func buildFenceAgentParams(far *v1alpha1.FenceAgentsRemediation) ([]string, erro
return nil, err
}
}
// if --action attribute was not selected, then it's default value is reboot
// if --action attribute was not selected, then its default value is reboot
// https://github.com/ClusterLabs/fence-agents/blob/main/lib/fencing.py.py#L103
// Therefore we can safely add the reboot action regardless if it was initially added into the CR
fenceAgentParams = appendParamToSlice(fenceAgentParams, parameterActionName, parameterActionValue)
Expand Down
22 changes: 10 additions & 12 deletions controllers/fenceagentsremediation_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ var _ = Describe("FAR Controller", func() {
va1 := createVA(vaName1, workerNode)
va2 := createVA(vaName2, workerNode)
testPod := createRunningPod("far-test-1", testPodName, workerNode)
DeferCleanup(verifyResourceCleanup, va1, va2, testPod)
DeferCleanup(cleanupTestedResources, va1, va2, testPod)
farPod := createRunningPod("far-manager-test", farPodName, "")
DeferCleanup(k8sClient.Delete, context.Background(), farPod)
})
Expand All @@ -150,7 +150,7 @@ var _ = Describe("FAR Controller", func() {
return utils.TaintExists(node.Spec.Taints, &farNoExecuteTaint) && res
}, 100*time.Millisecond, 10*time.Millisecond).Should(BeTrue(), "taint should be added, and command format is correct")

// If taint was added, then defenintly the finzlier was added as well
// If taint was added, then definitely the finalizer was added as well
By("Having a finalizer if we have a remediation taint")
Expect(controllerutil.ContainsFinalizer(underTestFAR, v1alpha1.FARFinalizer)).To(BeTrue())

Expand All @@ -163,17 +163,15 @@ var _ = Describe("FAR Controller", func() {
When("creating invalid FAR CR Name", func() {
BeforeEach(func() {
node = utils.GetNode("", workerNode)
// createVA(vaName1, workerNode)
// createVA(vaName2, workerNode)
underTestFAR = getFenceAgentsRemediation(dummyNode, fenceAgentIPMI, testShareParam, testNodeParam)
})
It("should not have a finalizer nor taint, while the two VAs and one pod will remain", func() {
By("Not finding a matching node to FAR CR's name")
nodeKey.Name = dummyNode
nodeKey.Name = underTestFAR.Name
Expect(k8sClient.Get(context.Background(), nodeKey, node)).To(Not(Succeed()))

By("Not having finalizer")
farNamespacedName.Name = dummyNode
farNamespacedName.Name = underTestFAR.Name
Eventually(func() bool {
Expect(k8sClient.Get(context.Background(), farNamespacedName, underTestFAR)).To(Succeed())
return controllerutil.ContainsFinalizer(underTestFAR, v1alpha1.FARFinalizer)
Expand Down Expand Up @@ -210,10 +208,10 @@ func buildPod(containerName, podName, nodeName string) *corev1.Pod {
pod := &corev1.Pod{}
pod.Name = podName
if podName == farPodName {
// only when we build FAR pod then we add it's label
// only when we build FAR pod then we add its label
pod.Labels = faPodLabels
} else {
// testedPod should be reside on unhealthy node
// testedPod should reside in unhealthy node
pod.Spec.NodeName = nodeName
}
pod.Namespace = defaultNamespace
Expand All @@ -225,7 +223,7 @@ func buildPod(containerName, podName, nodeName string) *corev1.Pod {
return pod
}

// createRunningPod builds new pod format, create it, and set it's status as running
// createRunningPod builds new pod format, create it, and set its status as running
func createRunningPod(containerName, podName, nodeName string) *corev1.Pod {
pod := buildPod(containerName, podName, nodeName)
Expect(k8sClient.Create(context.Background(), pod)).To(Succeed())
Expand All @@ -234,7 +232,7 @@ func createRunningPod(containerName, podName, nodeName string) *corev1.Pod {
return pod
}

// createVA creates new volume attachment and return it's object
// createVA creates new volume attachment and return its object
func createVA(vaName, unhealthyNodeName string) *storagev1.VolumeAttachment {
va := &storagev1.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -253,9 +251,9 @@ func createVA(vaName, unhealthyNodeName string) *storagev1.VolumeAttachment {
return va
}

// verifyResourceCleanup fetches all the resources that we have crated for the test
// cleanupTestedResources fetches all the resources that we have crated for the test
// and if they are still exist at the end of the test, then we clean them up for next test
func verifyResourceCleanup(va1, va2 *storagev1.VolumeAttachment, pod *corev1.Pod) {
func cleanupTestedResources(va1, va2 *storagev1.VolumeAttachment, pod *corev1.Pod) {
// clean test volume attachments if it exists
vaTest := &storagev1.VolumeAttachment{}
if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(va1), vaTest); err == nil {
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/far_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ var _ = Describe("FAR E2e", func() {
nodeBootTimeBefore, err = e2eUtils.GetBootTime(clientSet, testNodeName, testNsName, log)
Expect(err).ToNot(HaveOccurred(), "failed to get boot time of the node")

// create tested pod, and save it's creation time
// create tested pod, and save its creation time
// it will be deleted by FAR CR
pod = e2eUtils.GetPod(testNodeName, testContainerName)
pod.Name = testPodName
Expand All @@ -118,7 +118,7 @@ var _ = Describe("FAR E2e", func() {
log.Info("Tested pod has been created", "pod", testPodName)
creationTimePod = metav1.Now().Time
va = createVA(testNodeName)
DeferCleanup(verifyCleanSetup, va, pod)
DeferCleanup(cleanupTestedResources, va, pod)

far = createFAR(testNodeName, fenceAgent, testShareParam, testNodeParam)
DeferCleanup(deleteFAR, far)
Expand Down Expand Up @@ -281,8 +281,8 @@ func deleteFAR(far *v1alpha1.FenceAgentsRemediation) {
}, 2*time.Minute, 10*time.Second).ShouldNot(HaveOccurred(), "failed to delete far")
}

// verifyCleanSetup deletes an old pod and old va if it was not deleted from FAR CR
func verifyCleanSetup(va *storagev1.VolumeAttachment, pod *corev1.Pod) {
// cleanupTestedResources deletes an old pod and old va if it was not deleted from FAR CR
func cleanupTestedResources(va *storagev1.VolumeAttachment, pod *corev1.Pod) {
newVa := &storagev1.VolumeAttachment{}
if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(va), newVa); err == nil {
Expect(k8sClient.Delete(context.Background(), newVa)).To(Succeed())
Expand All @@ -309,7 +309,7 @@ func wasFarTaintAdded(nodeName string) {
log.Info("FAR taint was added", "node name", node.Name, "taint key", farTaint.Key, "taint effect", farTaint.Effect)
}

// checkFarLogs gets the FAR pod and checks whether it's logs have logString
// checkFarLogs gets the FAR pod and checks whether its logs have logString
func checkFarLogs(logString string) {
EventuallyWithOffset(1, func() string {
pod, err := utils.GetFenceAgentsRemediationPod(k8sClient)
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/utils/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const (
machinesNamespace = "openshift-machine-api"
)

// GetClusterInfo fetch the cluster's infrastructure object to identify it's type
// GetClusterInfo fetch the cluster's infrastructure object to identify its type
func GetClusterInfo(config configclient.Interface) (*configv1.Infrastructure, error) {
// oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.metadata.name}'
// oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.spec.platformSpec.type}'
Expand All @@ -35,8 +35,8 @@ func GetClusterInfo(config configclient.Interface) (*configv1.Infrastructure, er
return clusterInfra, nil
}

// GetSecretData searches for the platform's secret, and then returns it's decoded two data values.
// E.g. on AWS it would be the Access Key and it's ID, but on BMH with fence_impilan it would be useranme and password
// GetSecretData searches for the platform's secret, and then returns its decoded two data values.
// E.g. on AWS it would be the Access Key and its ID, but on BMH with fence_impilan it would be useranme and password
func GetSecretData(clientSet *kubernetes.Clientset, secretName, secretNamespace, secretData1, secretData2 string) (string, string, error) {
// oc get secrets -n openshift-machine-api aws-cloud-credentials -o jsonpath='{.data.aws_access_key_id}' | base64 -d
// oc get secrets -n openshift-machine-api aws-cloud-credentials -o jsonpath='{.data.aws_secret_access_key}' | base64 -d
Expand Down

0 comments on commit f6921cd

Please sign in to comment.