diff --git a/cmd/onclustertesting/clearstatus.go b/cmd/onclustertesting/clearstatus.go new file mode 100644 index 0000000000..58d6fc60de --- /dev/null +++ b/cmd/onclustertesting/clearstatus.go @@ -0,0 +1,35 @@ +package main + +import ( + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +var ( + clearStatusCmd = &cobra.Command{ + Use: "clear-build-status", + Short: "Tears down the pool for on-cluster build testing", + Long: "", + Run: runClearStatusCmd, + } + + clearStatusOpts struct { + poolName string + } +) + +func init() { + rootCmd.AddCommand(clearStatusCmd) + clearStatusCmd.PersistentFlags().StringVar(&clearStatusOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to clear build status on") +} + +func runClearStatusCmd(_ *cobra.Command, _ []string) { + common(clearStatusOpts) + + if clearStatusOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + failOnError(clearBuildStatusesOnPool(framework.NewClientSet(""), clearStatusOpts.poolName)) +} diff --git a/cmd/onclustertesting/extract.go b/cmd/onclustertesting/extract.go new file mode 100644 index 0000000000..b28cdea1d9 --- /dev/null +++ b/cmd/onclustertesting/extract.go @@ -0,0 +1,56 @@ +package main + +import ( + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +var ( + extractCmd = &cobra.Command{ + Use: "extract", + Short: "Extracts the Dockerfile and MachineConfig from an on-cluster build", + Long: "", + Run: runExtractCmd, + } + + extractOpts struct { + poolName string + machineConfig string + targetDir string + noConfigMaps bool + } +) + +func init() { + rootCmd.AddCommand(extractCmd) + extractCmd.PersistentFlags().StringVar(&extractOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to extract") + extractCmd.PersistentFlags().StringVar(&extractOpts.machineConfig, "machineconfig", "", "MachineConfig name to extract") + extractCmd.PersistentFlags().StringVar(&extractOpts.targetDir, "dir", "", "Dir to store extract build objects") +} + +func runExtractCmd(_ *cobra.Command, _ []string) { + common(extractOpts) + + if extractOpts.poolName == "" && extractOpts.machineConfig == "" { + klog.Fatalln("No pool name or MachineConfig name provided!") + } + + if extractOpts.poolName != "" && extractOpts.machineConfig != "" { + klog.Fatalln("Either pool name or MachineConfig must be provided. Not both!") + } + + targetDir := getDir(extractOpts.targetDir) + + cs := framework.NewClientSet("") + + if extractOpts.machineConfig != "" { + failOnError(extractBuildObjectsForRenderedMC(cs, extractOpts.machineConfig, targetDir)) + return + } + + if extractOpts.poolName != "" { + failOnError(extractBuildObjectsForTargetPool(cs, extractOpts.poolName, targetDir)) + return + } +} diff --git a/cmd/onclustertesting/helpers.go b/cmd/onclustertesting/helpers.go new file mode 100644 index 0000000000..bf168ed4b4 --- /dev/null +++ b/cmd/onclustertesting/helpers.go @@ -0,0 +1,625 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "text/template" + "time" + + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/version" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/machine-config-operator/pkg/controller/build" +) + +// Compresses and base-64 encodes a given byte array. Ideal for loading an +// arbitrary byte array into a ConfigMap or Secret. +func compressAndEncode(payload []byte) (*bytes.Buffer, error) { + out := bytes.NewBuffer(nil) + + if len(payload) == 0 { + return out, nil + } + + // We need to base64-encode our gzipped data so we can marshal it in and out + // of a string since ConfigMaps and Secrets expect a textual representation. + base64Enc := base64.NewEncoder(base64.StdEncoding, out) + defer base64Enc.Close() + + err := compress(bytes.NewBuffer(payload), base64Enc) + if err != nil { + return nil, fmt.Errorf("could not compress and encode payload: %w", err) + } + + err = base64Enc.Close() + if err != nil { + return nil, fmt.Errorf("could not close base64 encoder: %w", err) + } + + return out, err +} + +// Compresses a given io.Reader to a given io.Writer +func compress(r io.Reader, w io.Writer) error { + gz, err := gzip.NewWriterLevel(w, gzip.BestCompression) + if err != nil { + return fmt.Errorf("could not initialize gzip writer: %w", err) + } + + defer gz.Close() + + if _, err := io.Copy(gz, r); err != nil { + return fmt.Errorf("could not compress payload: %w", err) + } + + if err := gz.Close(); err != nil { + return fmt.Errorf("could not close gzipwriter: %w", err) + } + + return nil +} + +func storeMachineConfigOnDisk(cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool, dir string) error { + mc, err := cs.MachineConfigs().Get(context.TODO(), pool.Spec.Configuration.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + out, err := json.Marshal(mc) + if err != nil { + return err + } + + if err := os.Mkdir(filepath.Join(dir, "machineconfig"), 0o755); err != nil { + return err + } + + compressed, err := compressAndEncode(out) + if err != nil { + return err + } + + mcPath := filepath.Join(dir, "machineconfig", "machineconfig.json.gz") + + if err := os.WriteFile(mcPath, compressed.Bytes(), 0o755); err != nil { + return err + } + + klog.Infof("Stored MachineConfig %s on disk at %s", mc.Name, mcPath) + return nil +} + +// TODO: Dedupe this with the code from the buildcontroller package. +func getImageBuildRequest(cs *framework.ClientSet, targetPool string) (*build.ImageBuildRequest, error) { + osImageURLConfigMap, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), "machine-config-osimageurl", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + customDockerfile, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), "on-cluster-build-custom-dockerfile", metav1.GetOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return nil, err + } + + var customDockerfileContents string + if customDockerfile != nil { + customDockerfileContents = customDockerfile.Data[targetPool] + } + + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + buildReq := build.ImageBuildRequest{ + Pool: mcp, + BaseImage: build.ImageInfo{ + Pullspec: osImageURLConfigMap.Data["baseOSContainerImage"], + }, + ExtensionsImage: build.ImageInfo{ + Pullspec: osImageURLConfigMap.Data["baseOSExtensionsContainerImage"], + }, + ReleaseVersion: osImageURLConfigMap.Data["releaseVersion"], + CustomDockerfile: customDockerfileContents, + } + + return &buildReq, nil +} + +func renderDockerfile(ibr *build.ImageBuildRequest, out io.Writer, copyToStdout bool) error { + // TODO: Export the template from the assets package. + dockerfileTemplate, err := os.ReadFile("/Users/zzlotnik/go/src/github.com/openshift/machine-config-operator/pkg/controller/build/assets/Dockerfile.on-cluster-build-template") + if err != nil { + return err + } + + tmpl, err := template.New("dockerfile").Parse(string(dockerfileTemplate)) + if err != nil { + return err + } + + if copyToStdout { + out = io.MultiWriter(out, os.Stdout) + } + + return tmpl.Execute(out, ibr) +} + +func renderDockerfileToDisk(cs *framework.ClientSet, targetPool, dir string) error { + ibr, err := getImageBuildRequest(cs, targetPool) + if err != nil { + return err + } + + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + + dockerfilePath := filepath.Join(dir, "Dockerfile") + + dockerfile, err := os.Create(dockerfilePath) + defer func() { + failOnError(dockerfile.Close()) + }() + if err != nil { + return err + } + + klog.Infof("Rendered Dockerfile to disk at %s", dockerfilePath) + return renderDockerfile(ibr, dockerfile, false) +} + +func createPool(cs *framework.ClientSet, poolName string) (*mcfgv1.MachineConfigPool, error) { + pool := &mcfgv1.MachineConfigPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: poolName, + }, + Spec: mcfgv1.MachineConfigPoolSpec{ + MachineConfigSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: mcfgv1.MachineConfigRoleLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"worker", poolName}, + }, + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "node-role.kubernetes.io/" + poolName: "", + }, + }, + }, + } + + klog.Infof("Creating MachineConfigPool %q", pool.Name) + + _, err := cs.MachineConfigPools().Create(context.TODO(), pool, metav1.CreateOptions{}) + switch { + case apierrs.IsAlreadyExists(err): + klog.Infof("Pool %q already exists, will reuse", poolName) + case err != nil && !apierrs.IsAlreadyExists(err): + return nil, err + } + + klog.Infof("Waiting for pool %s to get a rendered MachineConfig", poolName) + + if _, err := waitForRenderedConfigs(cs, poolName, "99-worker-ssh"); err != nil { + return nil, err + } + + return cs.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) +} + +func optInPool(cs *framework.ClientSet, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if mcp.Labels == nil { + mcp.Labels = map[string]string{} + } + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + klog.Infof("Opted MachineConfigPool %q into layering", mcp.Name) + _, err = cs.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + return err + }) +} + +func addImageToLayeredPool(cs *framework.ClientSet, pullspec, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if mcp.Labels == nil { + if err := optInPool(cs, targetPool); err != nil { + return err + } + } + + if mcp.Annotations == nil { + mcp.Annotations = map[string]string{} + } + + mcp.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = pullspec + mcp, err = cs.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Applied image %q to MachineConfigPool %s", pullspec, mcp.Name) + return clearThenSetStatusOnPool(cs, targetPool, mcfgv1.MachineConfigPoolBuildSuccess, corev1.ConditionTrue) + }) +} + +func teardownPool(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) error { + if err := cs.MachineConfigPools().Delete(context.TODO(), mcp.Name, metav1.DeleteOptions{}); err != nil { + return err + } + + klog.Infof("Deleted pool %s", mcp.Name) + return nil +} + +func deleteAllNonStandardPools(cs *framework.ClientSet) error { + pools, err := cs.MachineConfigPools().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, pool := range pools.Items { + if pool.Name != "master" && pool.Name != "worker" { + if err := teardownPool(cs, &pool); err != nil { + return err + } + } + } + + return nil +} + +func resetAllNodeAnnotations(cs *framework.ClientSet) error { + workerPool, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + if err != nil { + return err + } + + nodes, err := cs.CoreV1Interface.Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, node := range nodes.Items { + if err := resetNodeAnnotationsAndLabels(cs, workerPool, &node); err != nil { + return err + } + } + + return nil +} + +func resetNodeAnnotationsAndLabels(cs *framework.ClientSet, originalPool *mcfgv1.MachineConfigPool, node *corev1.Node) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + expectedNodeRoles := map[string]struct{}{ + "node-role.kubernetes.io/worker": {}, + "node-role.kubernetes.io/master": {}, + "node-role.kubernetes.io/control-plane": {}, + } + + for label := range node.Labels { + _, isExpectedNodeRole := expectedNodeRoles[label] + if strings.HasPrefix(label, "node-role.kubernetes.io") && !isExpectedNodeRole { + delete(node.Labels, label) + } + } + + if _, ok := node.Labels[helpers.MCPNameToRole(originalPool.Name)]; ok { + node.Annotations[constants.CurrentMachineConfigAnnotationKey] = originalPool.Spec.Configuration.Name + node.Annotations[constants.DesiredMachineConfigAnnotationKey] = originalPool.Spec.Configuration.Name + delete(node.Annotations, constants.CurrentImageAnnotationKey) + delete(node.Annotations, constants.DesiredImageAnnotationKey) + } + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + return err + }) +} + +func deleteAllMachineConfigsForPool(cs *framework.ClientSet, targetPool string) error { + machineConfigs, err := cs.MachineConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, mc := range machineConfigs.Items { + if _, ok := mc.Annotations[helpers.MCPNameToRole(targetPool)]; ok && !strings.HasPrefix(mc.Name, "rendered-") { + if err := cs.MachineConfigs().Delete(context.TODO(), mc.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + return nil +} + +func deleteBuildObjects(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) error { + return deleteBuildObjectsForRenderedMC(cs, mcp.Spec.Configuration.Name) +} + +func deleteBuildObjectsForRenderedMC(cs *framework.ClientSet, renderedMC string) error { + listOpts := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("machineconfiguration.openshift.io/desiredConfig=%s", renderedMC), + } + + configMaps, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), listOpts) + + if err != nil { + return err + } + + for _, configMap := range configMaps.Items { + if err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}); err != nil { + return err + } + klog.Infof("Deleted ConfigMap %s", configMap.Name) + } + + pods, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), listOpts) + if err != nil { + return err + } + + for _, pod := range pods.Items { + if err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { + return err + } + klog.Infof("Deleted Pod %s", pod.Name) + } + + builds, err := cs.BuildV1Interface.Builds(ctrlcommon.MCONamespace).List(context.TODO(), listOpts) + if err != nil { + return err + } + + for _, build := range builds.Items { + if err := cs.BuildV1Interface.Builds(ctrlcommon.MCONamespace).Delete(context.TODO(), build.Name, metav1.DeleteOptions{}); err != nil { + return err + } + klog.Infof("Deleted Build %s", build.Name) + } + + return nil +} + +func waitForRenderedConfigs(cs *framework.ClientSet, pool string, mcNames ...string) (string, error) { + var renderedConfig string + startTime := time.Now() + found := make(map[string]bool) + + ctx := context.Background() + + if err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) { + // Set up the list + for _, name := range mcNames { + found[name] = false + } + + // Update found based on the MCP + mcp, err := cs.MachineConfigPools().Get(ctx, pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, mc := range mcp.Spec.Configuration.Source { + if _, ok := found[mc.Name]; ok { + found[mc.Name] = true + } + } + + // If any are still false, then they weren't included in the MCP + for _, nameFound := range found { + if !nameFound { + return false, nil + } + } + + // All the required names were found + renderedConfig = mcp.Spec.Configuration.Name + return true, nil + }); err != nil { + return "", fmt.Errorf("machine configs %v hasn't been picked by pool %s (waited %s): %w", notFoundNames(found), pool, time.Since(startTime), err) + } + klog.Infof("Pool %s has rendered configs %v with %s (waited %v)", pool, mcNames, renderedConfig, time.Since(startTime)) + return renderedConfig, nil +} + +func notFoundNames(foundNames map[string]bool) []string { + out := []string{} + for name, found := range foundNames { + if !found { + out = append(out, name) + } + } + return out +} + +func clearBuildStatusesOnPool(cs *framework.ClientSet, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + buildConditions := map[mcfgv1.MachineConfigPoolConditionType]struct{}{ + mcfgv1.MachineConfigPoolBuildSuccess: {}, + mcfgv1.MachineConfigPoolBuildFailed: {}, + mcfgv1.MachineConfigPoolBuildPending: {}, + mcfgv1.MachineConfigPoolBuilding: {}, + } + + filtered := []mcfgv1.MachineConfigPoolCondition{} + for _, cond := range mcp.Status.Conditions { + if _, ok := buildConditions[cond.Type]; !ok { + filtered = append(filtered, cond) + } + } + + mcp.Status.Conditions = filtered + _, err = cs.MachineConfigPools().UpdateStatus(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Cleared build statuses on MachineConfigPool %s", targetPool) + return nil + }) +} + +func setStatusOnPool(cs *framework.ClientSet, targetPool string, condType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + newCond := mcfgv1.NewMachineConfigPoolCondition(condType, status, "", "") + mcfgv1.SetMachineConfigPoolCondition(&mcp.Status, *newCond) + + _, err = cs.MachineConfigPools().UpdateStatus(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Set %s / %s on %s", condType, status, targetPool) + + return nil + }) +} + +func clearThenSetStatusOnPool(cs *framework.ClientSet, targetPool string, condType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := clearBuildStatusesOnPool(cs, targetPool); err != nil { + return err + } + + return setStatusOnPool(cs, targetPool, condType, status) + }) +} + +func extractBuildObjectsForTargetPool(cs *framework.ClientSet, targetPool, targetDir string) error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + return extractBuildObjects(cs, mcp, targetDir) +} + +func extractBuildObjects(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool, targetDir string) error { + return extractBuildObjectsForRenderedMC(cs, mcp.Spec.Configuration.Name, targetDir) +} + +func extractBuildObjectsForRenderedMC(cs *framework.ClientSet, mcName, targetDir string) error { + ctx := context.Background() + + dockerfileCM, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, "dockerfile-"+mcName, metav1.GetOptions{}) + if err != nil { + return err + } + + mcCM, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, "mc-"+mcName, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Extracted Dockerfile from %q", dockerfileCM.Name) + klog.Infof("Extracted MachineConfig %s from %q", mcName, mcCM.Name) + + return storeBuildObjectsOnDisk(dockerfileCM.Data["Dockerfile"], mcCM.Data["machineconfig.json.gz"], filepath.Join(targetDir, "build-objects-"+mcName)) +} + +func storeBuildObjectsOnDisk(dockerfile, machineConfig, targetDir string) error { + mcDirName := filepath.Join(targetDir, "machineconfig") + dockerfileName := filepath.Join(targetDir, "Dockerfile") + mcFilename := filepath.Join(targetDir, "machineconfig.json.gz") + + if err := os.MkdirAll(mcDirName, 0o755); err != nil { + return err + } + + if err := os.WriteFile(dockerfileName, []byte(dockerfile), 0o755); err != nil { + return err + } + + klog.Infof("Wrote Dockerfile to %s", dockerfileName) + + if err := os.WriteFile(mcFilename, []byte(machineConfig), 0o755); err != nil { + return err + } + + klog.Infof("Wrote MachineConfig to %s", mcFilename) + + return nil +} + +func getDir(target string) string { + if target != "" { + return target + } + + cwd, err := os.Getwd() + failOnError(err) + return cwd +} + +func failOnError(err error) { + if err != nil { + klog.Fatalln(err) + } +} + +func common(opts interface{}) { + flag.Set("v", "4") + flag.Set("logtostderr", "true") + flag.Parse() + + klog.V(2).Infof("Options parsed: %+v", opts) + + // To help debugging, immediately log version + klog.Infof("Version: %+v (%s)", version.Raw, version.Hash) +} + +func isEmpty(in string) bool { + return in == "" +} diff --git a/cmd/onclustertesting/machineconfig.go b/cmd/onclustertesting/machineconfig.go new file mode 100644 index 0000000000..46acb730bf --- /dev/null +++ b/cmd/onclustertesting/machineconfig.go @@ -0,0 +1,78 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog/v2" +) + +var ( + machineConfigCmd = &cobra.Command{ + Use: "machineconfig", + Short: "Creates a MachineConfig in a layered MachineConfigPool to cause a build", + Long: "", + Run: runMachineConfigCmd, + } + + machineConfigOpts struct { + poolName string + machineConfig string + } +) + +func init() { + rootCmd.AddCommand(machineConfigCmd) + machineConfigCmd.PersistentFlags().StringVar(&machineConfigOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to target") + machineConfigCmd.PersistentFlags().StringVar(&machineConfigOpts.machineConfig, "machineconfig", "", "MachineConfig name to create") +} + +func runMachineConfigCmd(_ *cobra.Command, _ []string) { + common(machineConfigOpts) + + if extractOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + cs := framework.NewClientSet("") + + failOnError(createMachineConfig(cs, machineConfigOpts.poolName, machineConfigOpts.machineConfig)) +} + +func createMachineConfig(cs *framework.ClientSet, targetPool, name string) error { + _, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if name == "" { + name = fmt.Sprintf("%s-%s", targetPool, uuid.NewUUID()) + } + + mc := helpers.NewMachineConfig(name, helpers.MCLabelForRole(targetPool), "", []ign3types.File{ + helpers.CreateEncodedIgn3File(filepath.Join("/etc", name), name, 420), + }) + + _, err = cs.MachineConfigs().Create(context.TODO(), mc, metav1.CreateOptions{}) + if err != nil { + return err + } + + klog.Infof("Created MachineConfig %q targeting pool %q", name, targetPool) + + renderedConfig, err := waitForRenderedConfigs(cs, targetPool, name) + if err != nil { + return err + } + + klog.Infof("MachineConfigPool %s got rendered config %q", targetPool, renderedConfig) + + return nil +} diff --git a/cmd/onclustertesting/main.go b/cmd/onclustertesting/main.go new file mode 100644 index 0000000000..759b10eca0 --- /dev/null +++ b/cmd/onclustertesting/main.go @@ -0,0 +1,30 @@ +package main + +import ( + "flag" + "os" + + "github.com/spf13/cobra" + + "k8s.io/component-base/cli" +) + +const ( + defaultLayeredPoolName string = "layered" +) + +var ( + rootCmd = &cobra.Command{ + Use: "onclustertesting", + Short: "Help with testing on-cluster builds", + Long: "", + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) +} + +func main() { + os.Exit(cli.Run(rootCmd)) +} diff --git a/cmd/onclustertesting/node.go b/cmd/onclustertesting/node.go new file mode 100644 index 0000000000..b54d2ad748 --- /dev/null +++ b/cmd/onclustertesting/node.go @@ -0,0 +1,154 @@ +package main + +import ( + "context" + "fmt" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var ( + optInCmd = &cobra.Command{ + Use: "optin", + Short: "Opts a node into on-cluster builds", + Long: "", + Run: runOptInCmd, + } + + optOutCmd = &cobra.Command{ + Use: "optout", + Short: "Opts a node out of on-cluster builds", + Long: "", + Run: runOptOutCmd, + } + + nodeOpts struct { + poolName string + nodeName string + force bool + } +) + +func init() { + rootCmd.AddCommand(optInCmd) + optInCmd.PersistentFlags().StringVar(&nodeOpts.poolName, "pool", defaultLayeredPoolName, "Pool name") + optInCmd.PersistentFlags().StringVar(&nodeOpts.nodeName, "node", "", "MachineConfig name") + + rootCmd.AddCommand(optOutCmd) + optOutCmd.PersistentFlags().StringVar(&nodeOpts.poolName, "pool", defaultLayeredPoolName, "Pool name") + optOutCmd.PersistentFlags().StringVar(&nodeOpts.nodeName, "node", "", "MachineConfig name") + optOutCmd.PersistentFlags().BoolVar(&nodeOpts.force, "force", false, "Forcefully opt node out") +} + +func runOptInCmd(_ *cobra.Command, _ []string) { + common(nodeOpts) + + if isEmpty(nodeOpts.poolName) { + klog.Fatalln("No pool name provided!") + } + + if isEmpty(nodeOpts.nodeName) { + klog.Fatalln("No node name provided!") + } + + failOnError(optInNode(framework.NewClientSet(""), nodeOpts.nodeName, nodeOpts.poolName)) +} + +func runOptOutCmd(_ *cobra.Command, _ []string) { + common(nodeOpts) + + if !nodeOpts.force && isEmpty(nodeOpts.poolName) { + klog.Fatalln("No pool name provided!") + } + + if isEmpty(nodeOpts.nodeName) { + klog.Fatalln("No node name provided!") + } + + failOnError(optOutNode(framework.NewClientSet(""), nodeOpts.nodeName, nodeOpts.poolName, nodeOpts.force)) +} + +func optInNode(cs *framework.ClientSet, nodeName, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Found pool %q", targetPool) + + if _, ok := mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel]; !ok { + return fmt.Errorf("Pool %q is not opted into layering", mcp.Name) + } + + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Found node %q", nodeName) + + invalidNodeRoles := []string{ + helpers.MCPNameToRole("master"), + helpers.MCPNameToRole("control-plane"), + } + + for _, invalidNodeRole := range invalidNodeRoles { + if _, ok := node.Labels[invalidNodeRole]; ok { + return fmt.Errorf("cannot opt node with role %q into layering", invalidNodeRole) + } + } + + if _, ok := node.Labels[helpers.MCPNameToRole(targetPool)]; ok { + return fmt.Errorf("node %q already has label %s", node.Name, helpers.MCPNameToRole(targetPool)) + } + + node.Labels[helpers.MCPNameToRole(targetPool)] = "" + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + if err == nil { + klog.Infof("Node %q opted into layering via pool %q", node.Name, mcp.Name) + } + return err + }) +} + +func optOutNode(cs *framework.ClientSet, nodeName, poolName string, force bool) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + if err != nil { + return err + } + + if force { + klog.Infof("Forcefully opting node %q out of layering", node.Name) + return resetNodeAnnotationsAndLabels(cs, workerMCP, node) + } + + role := helpers.MCPNameToRole(poolName) + + if _, ok := node.Labels[role]; !ok { + return fmt.Errorf("node %q does not have a label matching %q", node.Name, role) + } + + delete(node.Labels, role) + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + if err == nil { + klog.Infof("Opted node %q out of on-cluster builds", node.Name) + } + + return err + }) +} diff --git a/cmd/onclustertesting/render.go b/cmd/onclustertesting/render.go new file mode 100644 index 0000000000..cc97608d49 --- /dev/null +++ b/cmd/onclustertesting/render.go @@ -0,0 +1,49 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + renderCmd = &cobra.Command{ + Use: "render", + Short: "Renders the on-cluster build Dockerfile to disk", + Long: "", + Run: runRenderCmd, + } + + renderOpts struct { + poolName string + includeMachineConfig bool + targetDir string + } +) + +func init() { + rootCmd.AddCommand(renderCmd) + renderCmd.PersistentFlags().StringVar(&renderOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to render") + renderCmd.PersistentFlags().StringVar(&renderOpts.targetDir, "dir", "", "Dir to store rendered Dockerfile and MachineConfig in") +} + +func runRenderCmd(_ *cobra.Command, _ []string) { + common(renderOpts) + + if renderOpts.poolName == "" { + failOnError(fmt.Errorf("no pool name provided")) + } + + cs := framework.NewClientSet("") + + dir := filepath.Join(getDir(renderOpts.targetDir), renderOpts.poolName) + + failOnError(renderDockerfileToDisk(cs, renderOpts.poolName, dir)) + mcp, err := cs.MachineConfigPools().Get(context.TODO(), renderOpts.poolName, metav1.GetOptions{}) + failOnError(err) + failOnError(storeMachineConfigOnDisk(cs, mcp, dir)) +} diff --git a/cmd/onclustertesting/setimage.go b/cmd/onclustertesting/setimage.go new file mode 100644 index 0000000000..4512ef27d8 --- /dev/null +++ b/cmd/onclustertesting/setimage.go @@ -0,0 +1,53 @@ +package main + +import ( + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var ( + setImageCmd = &cobra.Command{ + Use: "set-image", + Short: "Sets an image pullspec on a MachineConfigPool", + Long: "", + Run: runSetImageCmd, + } + + setImageOpts struct { + poolName string + imageName string + } +) + +func init() { + rootCmd.AddCommand(setImageCmd) + setImageCmd.PersistentFlags().StringVar(&setImageOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to set build status on") + setImageCmd.PersistentFlags().StringVar(&setImageOpts.imageName, "image", "", "The image pullspec to set") +} + +func runSetImageCmd(_ *cobra.Command, _ []string) { + common(setImageOpts) + + if setImageOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + if setImageOpts.imageName == "" { + klog.Fatalln("No image name provided!") + } + + cs := framework.NewClientSet("") + failOnError(setImageOnPool(cs, setImageOpts.poolName, setImageOpts.imageName)) +} + +func setImageOnPool(cs *framework.ClientSet, targetPool, pullspec string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := optInPool(cs, targetPool); err != nil { + return err + } + + return addImageToLayeredPool(cs, pullspec, targetPool) + }) +} diff --git a/cmd/onclustertesting/setstatus.go b/cmd/onclustertesting/setstatus.go new file mode 100644 index 0000000000..1260070f90 --- /dev/null +++ b/cmd/onclustertesting/setstatus.go @@ -0,0 +1,72 @@ +package main + +import ( + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +var ( + setStatusCmd = &cobra.Command{ + Use: "set-build-status", + Short: "Sets the build status on a given MachineConfigPool", + Long: "", + Run: runSetStatusCmd, + } + + setStatusOpts struct { + poolName string + condType string + status bool + } +) + +func init() { + rootCmd.AddCommand(setStatusCmd) + setStatusCmd.PersistentFlags().StringVar(&setStatusOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to set build status on") + setStatusCmd.PersistentFlags().StringVar(&setStatusOpts.condType, "type", "", "The condition type to set") + setStatusCmd.PersistentFlags().BoolVar(&setStatusOpts.status, "status", false, "Condition true or false") +} + +func runSetStatusCmd(_ *cobra.Command, _ []string) { + common(setStatusOpts) + + if setStatusOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + validCondTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolUpdated, + mcfgv1.MachineConfigPoolUpdating, + mcfgv1.MachineConfigPoolNodeDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuilding, + mcfgv1.MachineConfigPoolBuildSuccess, + mcfgv1.MachineConfigPoolBuildFailed, + } + + var condTypeToSet mcfgv1.MachineConfigPoolConditionType + for _, condType := range validCondTypes { + if string(condType) == setStatusOpts.condType { + condTypeToSet = mcfgv1.MachineConfigPoolConditionType(setStatusOpts.condType) + break + } + } + + if condTypeToSet == "" { + klog.Fatalf("unknown condition type %q, valid options: %v", setStatusOpts.condType, validCondTypes) + } + + status := map[bool]corev1.ConditionStatus{ + true: corev1.ConditionTrue, + false: corev1.ConditionFalse, + } + + if err := setStatusOnPool(framework.NewClientSet(""), setStatusOpts.poolName, condTypeToSet, status[setStatusOpts.status]); err != nil { + klog.Fatal(err) + } +} diff --git a/cmd/onclustertesting/setup.go b/cmd/onclustertesting/setup.go new file mode 100644 index 0000000000..f82fffefa7 --- /dev/null +++ b/cmd/onclustertesting/setup.go @@ -0,0 +1,70 @@ +package main + +import ( + "flag" + + "github.com/openshift/machine-config-operator/pkg/version" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +var ( + setupCmd = &cobra.Command{ + Use: "setup", + Short: "Sets up pool for on-cluster build testing", + Long: "", + Run: runSetupCmd, + } + + setupOpts struct { + poolName string + waitForBuildInfo bool + } +) + +func init() { + rootCmd.AddCommand(setupCmd) + setupCmd.PersistentFlags().StringVar(&setupOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to setup") + setupCmd.PersistentFlags().BoolVar(&setupOpts.waitForBuildInfo, "wait-for-build", false, "Wait for build info") +} + +func runSetupCmd(_ *cobra.Command, _ []string) { + flag.Set("v", "4") + flag.Set("logtostderr", "true") + flag.Parse() + + klog.V(2).Infof("Options parsed: %+v", setupOpts) + + // To help debugging, immediately log version + klog.Infof("Version: %+v (%s)", version.Raw, version.Hash) + + if setupOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + if err := mobSetup(framework.NewClientSet(""), setupOpts.poolName, setupOpts.waitForBuildInfo); err != nil { + klog.Fatal(err) + } +} + +func mobSetup(cs *framework.ClientSet, targetPool string, getBuildInfo bool) error { + if _, err := createPool(cs, targetPool); err != nil { + return err + } + + if err := optInPool(cs, targetPool); err != nil { + return err + } + + if !getBuildInfo { + return nil + } + + return waitForBuildInfo(cs, targetPool) +} + +func waitForBuildInfo(_ *framework.ClientSet, _ string) error { + klog.Infof("no-op for now") + return nil +} diff --git a/cmd/onclustertesting/teardown.go b/cmd/onclustertesting/teardown.go new file mode 100644 index 0000000000..5f84c64533 --- /dev/null +++ b/cmd/onclustertesting/teardown.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + "errors" + "io/fs" + + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +var ( + teardownCmd = &cobra.Command{ + Use: "teardown", + Short: "Tears down the pool for on-cluster build testing", + Long: "", + Run: runTeardownCmd, + } + + teardownOpts struct { + poolName string + extract bool + dir string + } +) + +func init() { + rootCmd.AddCommand(teardownCmd) + teardownCmd.PersistentFlags().StringVar(&teardownOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to teardown") + teardownCmd.PersistentFlags().BoolVar(&teardownOpts.extract, "extract-objects", false, "Extract and store build objects on disk before teardown") + teardownCmd.PersistentFlags().StringVar(&teardownOpts.dir, "dir", "", "Dir to store extract build objects") +} + +func runTeardownCmd(_ *cobra.Command, _ []string) { + common(teardownOpts) + + if teardownOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + targetDir := getDir(teardownOpts.dir) + + failOnError(mobTeardown(framework.NewClientSet(""), teardownOpts.poolName, targetDir, teardownOpts.extract)) +} + +func mobTeardown(cs *framework.ClientSet, targetPool, targetDir string, extractObjects bool) error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if extractObjects { + klog.Infof("Extracting build objects (if they exist) to %s", targetDir) + if err := extractBuildObjects(cs, mcp, targetDir); err != nil { + if errors.Is(err, fs.ErrNotExist) || apierrs.IsNotFound(err) { + klog.Warningf("Recovered from: %s", err) + } else { + return err + } + } + } else { + klog.Infof("Skipping build object extraction") + } + + if err := deleteBuildObjects(cs, mcp); err != nil { + return err + } + + if err := teardownPool(cs, mcp); err != nil { + return err + } + + if err := deleteAllNonStandardPools(cs); err != nil { + return err + } + + return deleteAllMachineConfigsForPool(cs, targetPool) +}