diff --git a/images/spiderpool-plugins/version.sh b/images/spiderpool-plugins/version.sh index 8d6c4da6d5..a88f2d9ec3 100644 --- a/images/spiderpool-plugins/version.sh +++ b/images/spiderpool-plugins/version.sh @@ -6,7 +6,7 @@ # this file is generated by updateLatestCniPluginVersion.sh , please do not edit # https://github.com/containernetworking/plugins -export CNI_VERSION=${CNI_VERSION:-"v1.5.1"} +export CNI_VERSION=${CNI_VERSION:-"v1.6.0"} # https://github.com/k8snetworkplumbingwg/ovs-cni export OVS_VERSION=${OVS_VERSION:-"v0.34.2"} # https://github.com/k8snetworkplumbingwg/rdma-cni diff --git a/test/e2e/common/constant.go b/test/e2e/common/constant.go index a79256ad5a..26bddc4bc4 100644 --- a/test/e2e/common/constant.go +++ b/test/e2e/common/constant.go @@ -32,6 +32,8 @@ const ( BatchCreateTimeout = time.Minute * 5 KdoctorCheckTime = time.Minute * 10 SpiderSyncMultusTime = time.Minute * 2 + InformerSyncStatusTime = time.Second * 30 + KDoctorRunTimeout = time.Minute * 10 ) var ForcedWaitingTime = time.Second diff --git a/test/e2e/common/node.go b/test/e2e/common/node.go index 3cd58cd993..bd7e85b7f9 100644 --- a/test/e2e/common/node.go +++ b/test/e2e/common/node.go @@ -8,6 +8,7 @@ import ( "fmt" "os/exec" + "github.com/hashicorp/go-multierror" . "github.com/onsi/ginkgo/v2" e2e "github.com/spidernet-io/e2eframework/framework" corev1 "k8s.io/api/core/v1" @@ -55,3 +56,36 @@ func RestartNodeUntilClusterReady(ctx context.Context, frame *e2e.Framework, nod GinkgoWriter.Println("Check that the status of all Pods in the cluster is running") return nil } + +func GetNodeNetworkInfo(ctx context.Context, frame *e2e.Framework, nodeList []string) error { + var jobResult *multierror.Error + for _, node := range nodeList { + GinkgoWriter.Printf("=============== Check the network information of the node %v ============== \n", node) + commands := []string{ + "ip a", + "ip link show", + "ip n", + "ip -6 n", + "ip rule", + "ip -6 rule", + "ip route", + "ip route show table 100", + "ip route show table 101", + "ip route show table 500", + "ip -6 route", + "ip -6 route show table 100", + "ip -6 route show table 101", + "ip -6 route show table 500", + } + + for _, command := range commands { + GinkgoWriter.Printf("--------------- execute %v in node: %v ------------ \n", command, node) + out, err := frame.DockerExecCommand(ctx, node, command) + if err != nil { + jobResult = multierror.Append(jobResult, fmt.Errorf("node %v: command '%v' failed with error: %w, output: %s", node, command, err, out)) + } + } + } + + return jobResult.ErrorOrNil() +} diff --git a/test/e2e/common/pod.go b/test/e2e/common/pod.go index ddadadda71..9fb109cd8f 100644 --- a/test/e2e/common/pod.go +++ b/test/e2e/common/pod.go @@ -9,8 +9,10 @@ import ( "time" "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/utils/retry" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/hashicorp/go-multierror" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" e2e "github.com/spidernet-io/e2eframework/framework" @@ -49,7 +51,14 @@ func GenerateExamplePodYaml(podName, namespace string) *corev1.Pod { func CreatePodUntilReady(frame *e2e.Framework, podYaml *corev1.Pod, podName, namespace string, waitPodStartTimeout time.Duration) (pod *corev1.Pod, podIPv4, podIPv6 string) { // create pod GinkgoWriter.Printf("create pod %v/%v \n", namespace, podName) - err := frame.CreatePod(podYaml) + err := retry.RetryOnConflictWithContext(context.Background(), retry.DefaultBackoff, func(ctx context.Context) error { + err := frame.CreatePod(podYaml) + if err != nil { + GinkgoLogr.Error(fmt.Errorf("failed to create pod %v/%v, error: %v", namespace, podName, err), "Failed") + return err + } + return nil + }) Expect(err).NotTo(HaveOccurred(), "failed to create pod") // wait for pod ip @@ -143,3 +152,34 @@ func ValidatePodIPConflict(podList *corev1.PodList) error { } return nil } + +func GetPodNetworkInfo(ctx context.Context, frame *e2e.Framework, podList *corev1.PodList) error { + var jobResult *multierror.Error + for _, pod := range podList.Items { + GinkgoWriter.Printf("=============== Check the network information of the pod %v/%v ============== \n", pod.Namespace, pod.Name) + commands := []string{ + "ip a", + "ip link show", + "ip n", + "ip -6 n", + "ip rule", + "ip -6 rule", + "ip route", + "ip route show table 100", + "ip route show table 101", + "ip -6 route", + "ip -6 route show table 100", + "ip -6 route show table 101", + } + + for _, command := range commands { + GinkgoWriter.Printf("--------------- execute %v in pod: %v/%v on node: %v ------------ \n", command, pod.Namespace, pod.Name, pod.Spec.NodeName) + out, err := frame.ExecCommandInPod(pod.Name, pod.Namespace, command, ctx) + if err != nil { + jobResult = multierror.Append(jobResult, fmt.Errorf("pod %v/%v: command '%v' failed with error: %w, output: %s", pod.Namespace, pod.Name, command, err, out)) + } + } + } + + return jobResult.ErrorOrNil() +} diff --git a/test/e2e/common/spiderpool.go b/test/e2e/common/spiderpool.go index 00c3ed562e..fcd2cf1a74 100644 --- a/test/e2e/common/spiderpool.go +++ b/test/e2e/common/spiderpool.go @@ -516,7 +516,7 @@ func DeleteIPPoolUntilFinish(f *frame.Framework, poolName string, ctx context.Co default: _, err := GetIppoolByName(f, poolName) if err != nil { - GinkgoWriter.Printf("IPPool '%s' has been removed,error: %v", poolName, err) + GinkgoWriter.Printf("IPPool '%s' has been removed, error: %v", poolName, err) return nil } time.Sleep(ForcedWaitingTime) @@ -608,7 +608,7 @@ func WaitWorkloadDeleteUntilFinish(ctx context.Context, f *frame.Framework, name _, err := GetWorkloadByName(f, namespace, name) if err != nil { if api_errors.IsNotFound(err) { - GinkgoWriter.Printf("workload '%s/%s' has been removed,error: %v", namespace, name, err) + GinkgoWriter.Printf("workload '%s/%s' has been removed, error: %v", namespace, name, err) return nil } return err @@ -987,24 +987,50 @@ func CheckIppoolSanity(f *frame.Framework, poolName string) error { } } - if *ippool.Status.AllocatedIPCount > *ippool.Status.TotalIPCount { - GinkgoWriter.Printf( - "allocated IP count (%v) exceeds total IP count (%v) \n", - *ippool.Status.AllocatedIPCount, *ippool.Status.TotalIPCount, - ) - isSanity = false - } + // The status of IPPool is automatically synchronized by the IPPool informer based on the events it receives. + // In the CI environment, the creation of IPPools happens very quickly, and their health checks are performed promptly. + // When checking the TotalIPCount status, if the spiderpool-controller undergoes a leader election or the informer has not yet completed synchronization, + // the IPPool status TotalIPCount may be nil. This can lead to a panic. + // In such cases, try waiting for the informer to complete status synchronization before checking the robustness of the IPPool. + ctx, cancel := context.WithTimeout(context.Background(), InformerSyncStatusTime) + defer cancel() + for { + select { + case <-ctx.Done(): + return fmt.Errorf("waiting for informer to synchronize IPPool %s status timed out", poolName) + default: + if ippool.Status.AllocatedIPCount == nil || ippool.Status.TotalIPCount == nil { + GinkgoLogr.Error(fmt.Errorf("IPPool %s has nil status fields, retrying", poolName), "Failed") + ippool, err = GetIppoolByName(f, poolName) + if err != nil { + if api_errors.IsNotFound(err) { + return fmt.Errorf("ippool %s does not exist", poolName) + } + return fmt.Errorf("failed to get ippool %s, error %v", poolName, err) + } + time.Sleep(ForcedWaitingTime) + continue + } - // Ensure that the IP pool's reported usage matches the actual usage - if actualIPUsageCount != int(*ippool.Status.AllocatedIPCount) { - GinkgoWriter.Printf("IPPool %s usage count mismatch: expected %d, got %d \n", poolName, actualIPUsageCount, *ippool.Status.AllocatedIPCount) - isSanity = false - } + if *ippool.Status.AllocatedIPCount > *ippool.Status.TotalIPCount { + GinkgoWriter.Printf( + "allocated IP count (%v) exceeds total IP count (%v) \n", + *ippool.Status.AllocatedIPCount, *ippool.Status.TotalIPCount, + ) + isSanity = false + } + // Ensure that the IP pool's reported usage matches the actual usage + if actualIPUsageCount != int(*ippool.Status.AllocatedIPCount) { + GinkgoWriter.Printf("IPPool %s usage count mismatch: expected %d, got %d \n", poolName, actualIPUsageCount, *ippool.Status.AllocatedIPCount) + isSanity = false + } - if !isSanity { - return fmt.Errorf("IPPool %s sanity check failed", poolName) - } + if !isSanity { + return fmt.Errorf("IPPool %s sanity check failed", poolName) + } - GinkgoWriter.Printf("Successfully checked IPPool %s sanity, IPPool record information is correct \n", poolName) - return nil + GinkgoWriter.Printf("Successfully checked IPPool %s sanity, IPPool record information is correct \n", poolName) + return nil + } + } } diff --git a/test/e2e/coordinator/macvlan-underlay-one/macvlan_underlay_one_test.go b/test/e2e/coordinator/macvlan-underlay-one/macvlan_underlay_one_test.go index aaed7eef19..5902736912 100644 --- a/test/e2e/coordinator/macvlan-underlay-one/macvlan_underlay_one_test.go +++ b/test/e2e/coordinator/macvlan-underlay-one/macvlan_underlay_one_test.go @@ -19,7 +19,6 @@ import ( apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - "github.com/spidernet-io/spiderpool/pkg/constant" pkgconstant "github.com/spidernet-io/spiderpool/pkg/constant" "github.com/spidernet-io/spiderpool/pkg/ip" spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" @@ -68,7 +67,10 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" // Schedule crontab := "1 1" schedule.Schedule = &crontab - schedule.RoundNumber = 1 + // The sporadic test failures in kdoctor were attempted to be reproduced, but couldn't be. + // By leveraging kdoctor's loop testing, if a failure occurs in the first test, + // check whether it also fails on the second attempt. + schedule.RoundNumber = 3 schedule.RoundTimeoutMinute = 1 task.Spec.Schedule = schedule @@ -85,7 +87,7 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" task.Spec.Target = targetAgent // request - request.DurationInSecond = 5 + request.DurationInSecond = 10 request.QPS = 1 request.PerRequestTimeoutInMS = 7000 task.Spec.Request = request @@ -94,15 +96,12 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" condition.SuccessRate = &successRate condition.MeanAccessDelayInMs = &delayMs task.Spec.SuccessCondition = condition - taskCopy := task - GinkgoWriter.Printf("kdoctor task: %+v \n", task) err := frame.CreateResource(task) - Expect(err).NotTo(HaveOccurred(), " kdoctor nethttp crd create failed") - - err = frame.GetResource(apitypes.NamespacedName{Name: name}, taskCopy) - Expect(err).NotTo(HaveOccurred(), " kdoctor nethttp crd get failed") + Expect(err).NotTo(HaveOccurred(), "failed to create kdoctor task") + GinkgoWriter.Printf("succeeded to create kdoctor task: %+v \n", task) + // update the kdoctor service to use corev1.ServiceExternalTrafficPolicyLocal if frame.Info.IpV4Enabled { kdoctorIPv4ServiceName := fmt.Sprintf("%s-%s-ipv4", "kdoctor-netreach", task.Name) var kdoctorIPv4Service *corev1.Service @@ -138,52 +137,50 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" Expect(frame.UpdateResource(kdoctorIPv6Service)).NotTo(HaveOccurred()) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*60*5) + // waiting for kdoctor task to finish + ctx, cancel := context.WithTimeout(context.Background(), common.KDoctorRunTimeout) defer cancel() - var err1 = errors.New("error has occurred") - for run { + for { select { case <-ctx.Done(): - run = false - Expect(errors.New("wait nethttp test timeout")).NotTo(HaveOccurred(), " running kdoctor task timeout") + Expect(errors.New("timeout waiting for kdoctor task to finish")).NotTo(HaveOccurred()) default: + taskCopy := task err = frame.GetResource(apitypes.NamespacedName{Name: name}, taskCopy) - Expect(err).NotTo(HaveOccurred(), " kdoctor nethttp crd get failed") - - if taskCopy.Status.Finish == true { - command := fmt.Sprintf("get netreaches.kdoctor.io %s -oyaml", taskCopy.Name) - netreachesLog, _ := frame.ExecKubectl(command, ctx) - GinkgoWriter.Printf("kdoctor's netreaches execution result %+v \n", string(netreachesLog)) - - for _, v := range taskCopy.Status.History { - if v.Status == "succeed" { - err1 = nil + Expect(err).NotTo(HaveOccurred(), "Failed to get kdoctor task") + if taskCopy.Status.Finish { + roundFailed := false + for _, t := range taskCopy.Status.History { + // No configuration has been changed, The first round of the test is not considered a failure + if t.RoundNumber != 1 && t.Status == "failed" { + roundFailed = true + break } } - run = false - - ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second*30) - defer cancel1() - for { - select { - case <-ctx1.Done(): - Expect(errors.New("wait kdoctorreport timeout")).NotTo(HaveOccurred(), "failed to run kdoctor task and wait kdoctorreport timeout") - default: - command = fmt.Sprintf("get kdoctorreport %s -oyaml", taskCopy.Name) - kdoctorreportLog, err := frame.ExecKubectl(command, ctx) - if err != nil { - time.Sleep(common.ForcedWaitingTime) - continue - } - GinkgoWriter.Printf("kdoctor's kdoctorreport execution result %+v \n", string(kdoctorreportLog)) - } - break + if roundFailed { + Fail("kdoctor task is not successful") + } + return + } + for _, t := range taskCopy.Status.History { + // If the check is successful, exit directly. + if t.RoundNumber == 1 && t.Status == "succeed" { + GinkgoWriter.Println("succeed to run kdoctor task") + return + } + // If the check fails, we should collect the failed Pod network information as soon as possible + // If the first attempt failed but the second attempt succeeded, + // we collected network logs and compared the two attempts to see if there were any differences. + if t.Status == "failed" || (t.RoundNumber != 1 && t.Status == "succeed") { + GinkgoLogr.Error(fmt.Errorf("Failed to run kdoctor task, round %d, at time %s", t.RoundNumber, time.Now()), "Failed") + podList, err := frame.GetPodListByLabel(map[string]string{"app.kubernetes.io/name": taskCopy.Name}) + Expect(err).NotTo(HaveOccurred(), "Failed to get pod list by label") + Expect(common.GetPodNetworkInfo(ctx, frame, podList)).NotTo(HaveOccurred(), "Failed to get pod network info") + Expect(common.GetNodeNetworkInfo(ctx, frame, frame.Info.KindNodeList)).NotTo(HaveOccurred(), "Failed to get node network info") } } - time.Sleep(time.Second * 5) } } - Expect(err1).NotTo(HaveOccurred()) }) }) @@ -232,7 +229,7 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" Namespace: namespace, }, Spec: spiderpoolv2beta1.MultusCNIConfigSpec{ - CniType: ptr.To(constant.MacvlanCNI), + CniType: ptr.To(pkgconstant.MacvlanCNI), MacvlanConfig: &spiderpoolv2beta1.SpiderMacvlanCniConfig{ Master: []string{common.NIC1}, VlanID: ptr.To(int32(100)), @@ -283,7 +280,7 @@ var _ = Describe("MacvlanUnderlayOne", Serial, Label("underlay", "one-interface" Expect(err).NotTo(HaveOccurred()) var annotations = make(map[string]string) annotations[common.MultusNetworks] = fmt.Sprintf("%s/%s", namespace, multusNadName) - annotations[constant.AnnoPodIPPools] = string(podAnnoMarshal) + annotations[pkgconstant.AnnoPodIPPools] = string(podAnnoMarshal) deployObject := common.GenerateExampleDeploymentYaml(depName, namespace, int32(1)) deployObject.Spec.Template.Annotations = annotations Expect(frame.CreateDeployment(deployObject)).NotTo(HaveOccurred()) diff --git a/test/e2e/reclaim/reclaim_test.go b/test/e2e/reclaim/reclaim_test.go index 285f357e20..8990fab27e 100644 --- a/test/e2e/reclaim/reclaim_test.go +++ b/test/e2e/reclaim/reclaim_test.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubectl/pkg/util/podutils" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -701,22 +702,50 @@ var _ = Describe("test ip with reclaim ip case", Label("reclaim"), func() { Expect(err).NotTo(HaveOccurred(), "Failed exec '%s' in docker container '%s', error is: %v,log: %v.", commandStr, workerNodeName, err, string(output)) DeferCleanup(func() { - // Prevent spiderpoolcontroller Pod termination failure + commandStr = "systemctl start kubelet" + output, err = frame.DockerExecCommand(ctx, workerNodeName, commandStr) + Expect(err).NotTo(HaveOccurred(), "Failed exec '%s' in docker container '%s', error is: %v,log: %v.", commandStr, workerNodeName, err, string(output)) + Eventually(func() error { + checkCommandStr := "systemctl is-active kubelet" + output, err := frame.DockerExecCommand(ctx, workerNodeName, checkCommandStr) + if err != nil { + return fmt.Errorf("Failed to check kubelet status: %v, log: %v", err, string(output)) + } + if strings.TrimSpace(string(output)) != "active" { + return fmt.Errorf("kubelet is not running, status: %v", strings.TrimSpace(string(output))) + } + return nil + }).WithTimeout(common.PodReStartTimeout).WithPolling(10 * time.Second).Should(BeNil()) + + // Prevent spiderpoolcontroller Pod termination failure, avoid spiderpoolcontroller Pod deletion timeout podList, err := frame.GetPodListByLabel(map[string]string{"app.kubernetes.io/component": constant.SpiderpoolController}) Expect(err).NotTo(HaveOccurred(), "Failed get SpiderpoolController Pod list, error is: %v", err) var deletePodList *corev1.PodList + needDelete := false for _, spiderpoolControllerPod := range podList.Items { - if spiderpoolControllerPod.Spec.NodeName == workerNodeName { - deletePodList = &corev1.PodList{ - Items: []corev1.Pod{spiderpoolControllerPod}, - } + if spiderpoolControllerPod.Spec.NodeName == workerNodeName && !podutils.IsPodReady(&spiderpoolControllerPod) && spiderpoolControllerPod.DeletionTimestamp != nil { + needDelete = true + deletePodList = &corev1.PodList{Items: []corev1.Pod{spiderpoolControllerPod}} } } - Expect(frame.DeletePodList(deletePodList)).NotTo(HaveOccurred(), client.DeleteOptions{GracePeriodSeconds: ptr.To(int64(0))}) - - commandStr = "systemctl start kubelet" - output, err = frame.DockerExecCommand(ctx, workerNodeName, commandStr) - Expect(err).NotTo(HaveOccurred(), "Failed exec '%s' in docker container '%s', error is: %v,log: %v.", commandStr, workerNodeName, err, string(output)) + if needDelete { + Expect(frame.DeletePodList(deletePodList)).NotTo(HaveOccurred()) + Eventually(func() error { + newPodList, err := frame.GetPodListByLabel(map[string]string{"app.kubernetes.io/component": constant.SpiderpoolController}) + if err != nil { + return err + } + if len(newPodList.Items) == 0 && len(newPodList.Items) != len(frame.Info.KindNodeList) { + return fmt.Errorf("The number of Spiderpool controllers does not meet expectations. Expected %d, but got %d.", len(frame.Info.KindNodeList), len(newPodList.Items)) + } + for _, newPod := range newPodList.Items { + if newPod.Spec.NodeName == workerNodeName && !podutils.IsPodReady(&newPod) { + return fmt.Errorf("Pod %s/%s on node '%s' is not running yet", newPod.Namespace, newPod.Name, workerNodeName) + } + } + return nil + }).WithTimeout(common.PodReStartTimeout).WithPolling(10 * time.Second).Should(BeNil()) + } // wait for Node spider-worker to be ready webhookHealthCheckClient := openapi.NewWebhookHealthCheckClient() diff --git a/test/scripts/debugEnv.sh b/test/scripts/debugEnv.sh index bffc346d28..26f8271a7e 100755 --- a/test/scripts/debugEnv.sh +++ b/test/scripts/debugEnv.sh @@ -286,6 +286,13 @@ elif [ "$TYPE"x == "detail"x ] ; then echo "--------- kubectl logs ${POD} -n ${NAMESPACE} --previous" kubectl logs ${POD} -n ${NAMESPACE} --kubeconfig ${E2E_KUBECONFIG} --previous done + if [ -n "$KDOCTOR_POD_LIST" ]; then + echo "Fetching kdoctor reports..." + echo "--------- kubectl get kdoctorreport -A -ojson --------- " + kubectl get kdoctorreport -A -ojson --kubeconfig ${E2E_KUBECONFIG} + echo "--------- kubectl get kdoctorreport -A -oyaml --------- " + kubectl get kdoctorreport -A -oyaml --kubeconfig ${E2E_KUBECONFIG} + fi echo "" echo "=============== open kruise logs ============== " @@ -302,6 +309,22 @@ elif [ "$TYPE"x == "detail"x ] ; then kubectl get netreach --kubeconfig ${E2E_KUBECONFIG} kubectl get netreach --kubeconfig ${E2E_KUBECONFIG} -o yaml + echo "" + echo "=============== kubelet and docker log ============== " + KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-"spider"} + KIND_NODES=$( kind get nodes --name ${KIND_CLUSTER_NAME} ) + [ -z "$KIND_NODES" ] && echo "warning, failed to find nodes of kind cluster $KIND_CLUSTER_NAME " || true + for NODE in $KIND_NODES ; do + echo "--------- kubelet status from node ${NODE}" + docker exec $NODE systemctl status kubelet -l + echo "--------- kubelete logs from node ${NODE}" + docker exec $NODE journalctl -u kubelet -n 500 + echo "--------- docker status from node ${NODE}" + docker exec $NODE systemctl status docker -l + echo "--------- docker logs from node ${NODE}" + docker exec $NODE journalctl -u docker -n 500 + done + elif [ "$TYPE"x == "error"x ] ; then CHECK_ERROR(){ LLOG_MARK="$1" diff --git a/test/scripts/install-kdoctor.sh b/test/scripts/install-kdoctor.sh index 0dacb9247b..8676b91216 100644 --- a/test/scripts/install-kdoctor.sh +++ b/test/scripts/install-kdoctor.sh @@ -24,7 +24,7 @@ echo "$CURRENT_FILENAME : KDOCTOR_REPORT_PATH $KDOCTOR_REPORT_PATH " [ ! -f "$E2E_KUBECONFIG" ] && echo "error, could not find file $E2E_KUBECONFIG " && exit 1 echo "$CURRENT_FILENAME : E2E_KUBECONFIG $E2E_KUBECONFIG " -KDOCTOR_VERSION=${KDOCTOR_VERSION:-0.2.0} +KDOCTOR_VERSION=${KDOCTOR_VERSION:-0.2.2} E2E_KDOCTOR_IMAGE_REPO=${E2E_KDOCTOR_IMAGE_REPO:-"ghcr.io"} INSTALL_TIME_OUT=300s