diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 3d12a502fd57..8f4226e2710e 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -67,6 +67,12 @@ type ClusterctlUpgradeSpecInput struct { ClusterctlConfigPath string BootstrapClusterProxy framework.ClusterProxy ArtifactFolder string + + // UseKindForManagementCluster instruct the test to use kind for creating the management cluster (instead to use the actual infrastructure provider). + // NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades. + // So we are creating a new management cluster where to install older version of providers + UseKindForManagementCluster bool + // InitWithBinary can be used to override the INIT_WITH_BINARY e2e config variable with the URL of the clusterctl binary of the old version of Cluster API. The spec will interpolate the // strings `{OS}` and `{ARCH}` to `runtime.GOOS` and `runtime.GOARCH` respectively, e.g. https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/clusterctl-{OS}-{ARCH} InitWithBinary string @@ -192,6 +198,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg managementClusterNamespace *corev1.Namespace managementClusterCancelWatches context.CancelFunc managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + managementClusterProvider bootstrap.ClusterProvider managementClusterProxy framework.ClusterProxy initClusterctlBinaryURL string @@ -199,9 +206,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg initKubernetesVersion string workloadClusterName string + + scheme *apiruntime.Scheme ) BeforeEach(func() { + scheme = apiruntime.NewScheme() + framework.TryAddDefaultSchemes(scheme) + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) input = inputGetter() Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) @@ -247,73 +259,104 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) - // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + // If the test is not being run in a separated kind cluster, setup a Namespace in the current bootstrap cluster where to host objects for this spec and create a watcher for the namespace events. + if !input.UseKindForManagementCluster { + managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + } managementClusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) It("Should create a management cluster and then upgrade all the providers", func() { - By("Creating a workload cluster to be used as a new management cluster") - // NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades. - // So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers infrastructureProvider := clusterctl.DefaultInfrastructureProvider if input.InfrastructureProvider != nil { infrastructureProvider = *input.InfrastructureProvider } - managementClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) - clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - ConfigCluster: clusterctl.ConfigClusterInput{ - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), - ClusterctlConfigPath: input.ClusterctlConfigPath, - KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: infrastructureProvider, - Flavor: input.MgmtFlavor, - Namespace: managementClusterNamespace.Name, - ClusterName: managementClusterName, - KubernetesVersion: initKubernetesVersion, - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), - }, - PreWaitForCluster: func() { - if input.PreWaitForCluster != nil { - input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName) - } - }, - CNIManifestPath: input.CNIManifestPath, - ControlPlaneWaiters: input.ControlPlaneWaiters, - WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), - WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), - WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), - }, managementClusterResources) - - By("Turning the workload cluster into a management cluster with older versions of providers") - - // If the cluster is a DockerCluster, we should load controller images into the nodes. - // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using - // this approach because this allows to have a single source of truth for images, the e2e config - // Nb. the images for official version of the providers will be pulled from internet, but the latest images must be - // built locally and loaded into kind - cluster := managementClusterResources.Cluster - if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { - Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{ - Name: cluster.Name, - Images: input.E2EConfig.Images, - })).To(Succeed()) + // NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades. + // So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers + managementClusterName = fmt.Sprintf("%s-management-%s", specName, util.RandomString(6)) + managementClusterLogFolder := filepath.Join(input.ArtifactFolder, "clusters", managementClusterName) + if input.UseKindForManagementCluster { + By("Creating a kind cluster to be used as a new management cluster") + + managementClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + Name: managementClusterName, + KubernetesVersion: initKubernetesVersion, + RequiresDockerSock: input.E2EConfig.HasDockerProvider(), + // Note: most of this images won't be used while starting the controllers, because it is used to spin up older versions of CAPI. Those images will be eventually used when upgrading to current. + Images: input.E2EConfig.Images, + IPFamily: input.E2EConfig.GetVariable(IPFamily), + LogFolder: filepath.Join(managementClusterLogFolder, "logs-kind"), + }) + Expect(managementClusterProvider).ToNot(BeNil(), "Failed to create a kind cluster") + + kubeconfigPath := managementClusterProvider.GetKubeconfigPath() + Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the kind cluster") + + managementClusterProxy = framework.NewClusterProxy(managementClusterName, kubeconfigPath, scheme) + Expect(managementClusterProxy).ToNot(BeNil(), "Failed to get a kind cluster proxy") + + managementClusterResources.Cluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managementClusterName, + }, + } + } else { + By("Creating a workload cluster to be used as a new management cluster") + + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: infrastructureProvider, + Flavor: input.MgmtFlavor, + Namespace: managementClusterNamespace.Name, + ClusterName: managementClusterName, + KubernetesVersion: initKubernetesVersion, + ControlPlaneMachineCount: pointer.Int64(1), + WorkerMachineCount: pointer.Int64(1), + }, + PreWaitForCluster: func() { + if input.PreWaitForCluster != nil { + input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName) + } + }, + CNIManifestPath: input.CNIManifestPath, + ControlPlaneWaiters: input.ControlPlaneWaiters, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }, managementClusterResources) + + // If the cluster is a DockerCluster, we should load controller images into the nodes. + // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using + // this approach because this allows to have a single source of truth for images, the e2e config + // Nb. the images for official version of the providers will be pulled from internet, but the latest images must be + // built locally and loaded into kind + cluster := managementClusterResources.Cluster + if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { + Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{ + Name: cluster.Name, + Images: input.E2EConfig.Images, + })).To(Succeed()) + } + + // Get a ClusterProxy so we can interact with the workload cluster + managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector())) } - // Get a ClusterProxy so we can interact with the workload cluster - managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector())) + By("Turning the new cluster into a management cluster with older versions of providers") // Download the clusterctl version that should be used to initially set up the management cluster (which is later upgraded). Byf("Downloading clusterctl binary from %s", initClusterctlBinaryURL) clusterctlBinaryPath, clusterctlConfigPath := setupClusterctl(ctx, initClusterctlBinaryURL, input.ClusterctlConfigPath) defer os.Remove(clusterctlBinaryPath) // clean up - By("Initializing the workload cluster with older versions of providers") + By("Initializing the new management cluster with older versions of providers") if input.PreInit != nil { - By("Running Pre-init steps against the management cluster") + By("Running Pre-init steps against the new management cluster") input.PreInit(managementClusterProxy) } @@ -356,7 +399,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg IPAMProviders: ipamProviders, RuntimeExtensionProviders: runtimeExtensionProviders, AddonProviders: addonProviders, - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + LogFolder: managementClusterLogFolder, }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!") @@ -375,7 +418,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg // In this case ApplyClusterTemplateAndWait can't be used because this helper is linked to the last version of the API; // so we are getting a template using the downloaded version of clusterctl, applying it, and wait for machines to be provisioned. - workloadClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + workloadClusterName = fmt.Sprintf("%s-workload-%s", specName, util.RandomString(6)) workloadClusterNamespace := testNamespace.Name kubernetesVersion := input.WorkloadKubernetesVersion if kubernetesVersion == "" { @@ -543,7 +586,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg IPAMProviders: upgrade.IPAMProviders, RuntimeExtensionProviders: upgrade.RuntimeExtensionProviders, AddonProviders: upgrade.AddonProviders, - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + LogFolder: managementClusterLogFolder, }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) } else { Byf("[%d] Upgrading providers to the latest version available", i) @@ -553,7 +596,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg ClusterctlVariables: input.UpgradeClusterctlVariables, ClusterProxy: managementClusterProxy, Contract: upgrade.Contract, - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + LogFolder: managementClusterLogFolder, }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) } @@ -686,8 +729,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Running PreCleanupManagementCluster steps against the management cluster") input.PreCleanupManagementCluster(managementClusterProxy) } + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + if input.UseKindForManagementCluster { + managementClusterProxy.Dispose(ctx) + managementClusterProvider.Dispose(ctx) + } else { + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + } }) } diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index fa446de86787..23b66d130f0d 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -84,8 +84,9 @@ var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func( UpgradeClusterctlVariables: map[string]string{ "CLUSTER_TOPOLOGY": "false", }, - MgmtFlavor: "topology", - WorkloadFlavor: "", + MgmtFlavor: "topology", + WorkloadFlavor: "", + UseKindForManagementCluster: true, } }) }) @@ -134,10 +135,11 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.5=>current)", func( }, // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v0.4/bases. // Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version. - InitWithKubernetesVersion: "v1.23.17", - WorkloadKubernetesVersion: "v1.23.17", - MgmtFlavor: "topology", - WorkloadFlavor: "", + InitWithKubernetesVersion: "v1.23.17", + WorkloadKubernetesVersion: "v1.23.17", + MgmtFlavor: "topology", + WorkloadFlavor: "", + UseKindForManagementCluster: true, } }) }) @@ -163,10 +165,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { // try to deploy the latest version of our test-extension from docker.yaml. InitWithRuntimeExtensionProviders: []string{}, // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.0/bases. - InitWithKubernetesVersion: "v1.23.17", - WorkloadKubernetesVersion: "v1.23.17", - MgmtFlavor: "topology", - WorkloadFlavor: "", + InitWithKubernetesVersion: "v1.23.17", + WorkloadKubernetesVersion: "v1.23.17", + MgmtFlavor: "topology", + WorkloadFlavor: "", + UseKindForManagementCluster: true, } }) }) @@ -189,10 +192,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() { InitWithInfrastructureProviders: []string{"docker:v1.4.5"}, InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases. - InitWithKubernetesVersion: "v1.27.3", - WorkloadKubernetesVersion: "v1.27.3", - MgmtFlavor: "topology", - WorkloadFlavor: "", + InitWithKubernetesVersion: "v1.27.3", + WorkloadKubernetesVersion: "v1.27.3", + MgmtFlavor: "topology", + WorkloadFlavor: "", + UseKindForManagementCluster: true, } }) }) @@ -215,10 +219,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur InitWithInfrastructureProviders: []string{"docker:v1.4.5"}, InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases. - InitWithKubernetesVersion: "v1.27.3", - WorkloadKubernetesVersion: "v1.27.3", - MgmtFlavor: "topology", - WorkloadFlavor: "topology", + InitWithKubernetesVersion: "v1.27.3", + WorkloadKubernetesVersion: "v1.27.3", + MgmtFlavor: "topology", + WorkloadFlavor: "topology", + UseKindForManagementCluster: true, } }) }) @@ -235,10 +240,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() { InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases. - InitWithKubernetesVersion: "v1.28.0", - WorkloadKubernetesVersion: "v1.28.0", - MgmtFlavor: "topology", - WorkloadFlavor: "", + InitWithKubernetesVersion: "v1.28.0", + WorkloadKubernetesVersion: "v1.28.0", + MgmtFlavor: "topology", + WorkloadFlavor: "", + UseKindForManagementCluster: true, } }) }) @@ -255,10 +261,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>cur InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases. - InitWithKubernetesVersion: "v1.28.0", - WorkloadKubernetesVersion: "v1.28.0", - MgmtFlavor: "topology", - WorkloadFlavor: "topology", + InitWithKubernetesVersion: "v1.28.0", + WorkloadKubernetesVersion: "v1.28.0", + MgmtFlavor: "topology", + WorkloadFlavor: "topology", + UseKindForManagementCluster: true, } }) }) diff --git a/test/framework/bootstrap/kind_util.go b/test/framework/bootstrap/kind_util.go index 097b3f38c984..9586b1b7b441 100644 --- a/test/framework/bootstrap/kind_util.go +++ b/test/framework/bootstrap/kind_util.go @@ -18,7 +18,6 @@ package bootstrap import ( "context" - "fmt" "os" "path/filepath" @@ -31,6 +30,8 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/test/framework/internal/log" "sigs.k8s.io/cluster-api/test/infrastructure/container" + kindmapper "sigs.k8s.io/cluster-api/test/infrastructure/kind" + "sigs.k8s.io/cluster-api/util/version" ) // CreateKindBootstrapClusterAndLoadImagesInput is the input for CreateKindBootstrapClusterAndLoadImages. @@ -63,7 +64,13 @@ func CreateKindBootstrapClusterAndLoadImages(ctx context.Context, input CreateKi options := []KindClusterOption{} if input.KubernetesVersion != "" { - options = append(options, WithNodeImage(fmt.Sprintf("%s:%s", DefaultNodeImageRepository, input.KubernetesVersion))) + semVer, err := version.ParseMajorMinorPatchTolerant(input.KubernetesVersion) + if err != nil { + Expect(err).ToNot(HaveOccurred(), "could not parse KubernetesVersion version") + } + kindMapping := kindmapper.GetMapping(semVer, "") + + options = append(options, WithNodeImage(kindMapping.Image)) } if input.RequiresDockerSock { options = append(options, WithDockerSockMount()) diff --git a/test/framework/exec/kubectl.go b/test/framework/exec/kubectl.go index 98c8313797bc..279e527a699f 100644 --- a/test/framework/exec/kubectl.go +++ b/test/framework/exec/kubectl.go @@ -38,8 +38,12 @@ func KubectlApply(ctx context.Context, kubeconfigPath string, resources []byte, fmt.Printf("Running kubectl %s\n", strings.Join(aargs, " ")) stdout, stderr, err := applyCmd.Run(ctx) - fmt.Printf("stderr:\n%s\n", string(stderr)) - fmt.Printf("stdout:\n%s\n", string(stdout)) + if len(stderr) > 0 { + fmt.Printf("stderr:\n%s\n", string(stderr)) + } + if len(stdout) > 0 { + fmt.Printf("stdout:\n%s\n", string(stdout)) + } return err }