Skip to content

Commit

Permalink
Bump CAPI to v1.7.5
Browse files Browse the repository at this point in the history
  • Loading branch information
haijianyang committed Aug 23, 2024
1 parent d9ab098 commit 521fdee
Show file tree
Hide file tree
Showing 51 changed files with 717 additions and 953 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,20 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4

- name: Calculate go version
run: echo "go_version=$(make go-version)" >> $GITHUB_ENV

- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.go_version }}

- run: make lint

- run: make test

- uses: codecov/codecov-action@v3
- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
8 changes: 4 additions & 4 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,18 @@ jobs:
fi
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4

- name: Calculate go version
run: echo "go_version=$(make go-version)" >> $GITHUB_ENV

- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.go_version }}

- name: Login docker
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
Expand All @@ -41,7 +41,7 @@ jobs:
run: make release

- name: Generate draft release
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
with:
draft: true
tag_name: ${{ env.TAG }}
Expand Down
2 changes: 1 addition & 1 deletion .go-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.21.11
1.22.5
6 changes: 3 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,9 @@ linters-settings:
- pkg: github.com/smartxworks/cluster-api-provider-elf/api/v1beta1
alias: infrav1
staticcheck:
go: "1.21"
go: "1.22"
stylecheck:
go: "1.21"
go: "1.22"
issues:
max-same-issues: 0
max-issues-per-linter: 0
Expand Down Expand Up @@ -169,7 +169,7 @@ issues:

run:
timeout: 10m
go: "1.21"
go: "1.22"
skip-files:
- "zz_generated.*\\.go$"
allow-parallel-runners: true
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
ARG ARCH

# Build the manager binary
FROM golang:1.21.11 as builder
FROM golang:1.22.5 as builder
WORKDIR /workspace

# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
Expand Down
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ VERSION ?= $(shell cat clusterctl-settings.json | jq .config.nextVersion -r)
#
# Go.
#
GO_VERSION ?= 1.21.11
GO_VERSION ?= 1.22.5

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
Expand Down Expand Up @@ -151,23 +151,23 @@ test-e2e: ginkgo kustomize kind ## Run e2e tests

KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/[email protected])
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/[email protected])

CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/[email protected])

GINKGO := $(shell pwd)/bin/ginkgo
ginkgo: ## Download ginkgo locally if necessary.
$(call go-get-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo@v2.13.1)
$(call go-get-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo@v2.20.0)

KIND := $(shell pwd)/bin/kind
kind: ## Download kind locally if necessary.
$(call go-get-tool,$(KIND),sigs.k8s.io/kind@v0.20.0)
$(call go-get-tool,$(KIND),sigs.k8s.io/kind@v0.23.0)

GOLANGCI_LINT := $(shell pwd)/bin/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.
$(call go-get-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2)
$(call go-get-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2)

## --------------------------------------
## Linting and fixing linter errors
Expand Down
2 changes: 1 addition & 1 deletion api/v1beta1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ type NetworkSpec struct {
}

func (n *NetworkSpec) RequiresStaticIPs() bool {
for i := 0; i < len(n.Devices); i++ {
for i := range len(n.Devices) {
if n.Devices[i].NetworkType == NetworkTypeIPV4 && len(n.Devices[i].IPAddrs) == 0 {
return true
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/elfcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ func (r *ElfClusterReconciler) cleanOrphanLabels(ctx goctx.Context, clusterCtx *
keys := []string{towerresources.GetVMLabelClusterName(), towerresources.GetVMLabelVIP(), towerresources.GetVMLabelNamespace()}
labelIDs, err := clusterCtx.VMService.CleanUnusedLabels(keys)
if err != nil {
log.Error(err, fmt.Sprintf("Warning: failed to clean orphan labels in Tower %s", clusterCtx.ElfCluster.Spec.Tower.Server))
log.Error(err, "Warning: failed to clean orphan labels in Tower "+clusterCtx.ElfCluster.Spec.Tower.Server)

return
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/elfcluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ var _ = Describe("ElfClusterReconciler", func() {
mockVMService.EXPECT().CleanUnusedLabels(keys).Return(nil, unexpectedError)
reconciler := &ElfClusterReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService}
reconciler.cleanOrphanLabels(ctx, clusterCtx)
Expect(logBuffer.String()).To(ContainSubstring(fmt.Sprintf("Warning: failed to clean orphan labels in Tower %s", elfCluster.Spec.Tower.Server)))
Expect(logBuffer.String()).To(ContainSubstring("Warning: failed to clean orphan labels in Tower " + elfCluster.Spec.Tower.Server))

logBuffer.Reset()
mockVMService.EXPECT().CleanUnusedLabels(keys).Return(nil, nil)
Expand Down
45 changes: 22 additions & 23 deletions controllers/elfmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apitypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
capiutil "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
Expand Down Expand Up @@ -540,15 +539,15 @@ func (r *ElfMachineReconciler) reconcileVM(ctx goctx.Context, machineCtx *contex
if canRetry, err := canRetryVMOperation(ctx, machineCtx, r.Client); err != nil {
return nil, false, err
} else if !canRetry {
log.V(1).Info(fmt.Sprintf("%s, skip creating VM", message))
log.V(1).Info(message + ", skip creating VM")
return nil, false, nil
}

log.V(1).Info(fmt.Sprintf("%s and the retry silence period passes, will try to create the VM again", message))
log.V(1).Info(message + " and the retry silence period passes, will try to create the VM again")
}

if ok, msg := acquireTicketForCreateVM(machineCtx.ElfMachine.Name, machineutil.IsControlPlaneMachine(machineCtx.ElfMachine)); !ok {
log.V(1).Info(fmt.Sprintf("%s, skip creating VM", msg))
log.V(1).Info(msg + ", skip creating VM")

Check warning on line 550 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L550

Added line #L550 was not covered by tests
return nil, false, nil
}

Expand Down Expand Up @@ -644,8 +643,8 @@ func (r *ElfMachineReconciler) reconcileVM(ctx goctx.Context, machineCtx *contex
// The VM was moved to the recycle bin. Treat the VM as deleted, and will not reconganize it even if it's moved back from the recycle bin.
if service.IsVMInRecycleBin(vm) {
message := fmt.Sprintf("The VM %s was moved to the Tower recycle bin by users, so treat it as deleted.", machineCtx.ElfMachine.Status.VMRef)
machineCtx.ElfMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capeerrors.MovedToRecycleBinError)
machineCtx.ElfMachine.Status.FailureMessage = pointer.String(message)
machineCtx.ElfMachine.Status.FailureReason = ptr.To(capeerrors.MovedToRecycleBinError)
machineCtx.ElfMachine.Status.FailureMessage = ptr.To(message)
machineCtx.ElfMachine.SetVM("")
log.Error(stderrors.New(message), "")

Expand Down Expand Up @@ -700,8 +699,8 @@ func (r *ElfMachineReconciler) getVM(ctx goctx.Context, machineCtx *context.Mach
}

// If the machine was not found by UUID and timed out it means that it got deleted directly
machineCtx.ElfMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capeerrors.RemovedFromInfrastructureError)
machineCtx.ElfMachine.Status.FailureMessage = pointer.String(fmt.Sprintf("Unable to find VM by UUID %s. The VM was removed from infrastructure.", machineCtx.ElfMachine.Status.VMRef))
machineCtx.ElfMachine.Status.FailureReason = ptr.To(capeerrors.RemovedFromInfrastructureError)
machineCtx.ElfMachine.Status.FailureMessage = ptr.To(fmt.Sprintf("Unable to find VM by UUID %s. The VM was removed from infrastructure.", machineCtx.ElfMachine.Status.VMRef))
log.Error(err, fmt.Sprintf("failed to get VM by UUID %s in %s", machineCtx.ElfMachine.Status.VMRef, infrav1.VMDisconnectionTimeout.String()), "message", machineCtx.ElfMachine.Status.FailureMessage)

return nil, err
Expand Down Expand Up @@ -779,7 +778,7 @@ func (r *ElfMachineReconciler) reconcileVMStatus(ctx goctx.Context, machineCtx *
// See issue http://jira.smartx.com/browse/SKS-1351 for details.
return false, r.powerOffVM(ctx, machineCtx)
default:
log.Info(fmt.Sprintf("The VM is in an unexpected status %s", string(*vm.Status)), "vmRef", machineCtx.ElfMachine.Status.VMRef)
log.Info("The VM is in an unexpected status "+string(*vm.Status), "vmRef", machineCtx.ElfMachine.Status.VMRef)

return false, nil
}
Expand Down Expand Up @@ -814,7 +813,7 @@ func (r *ElfMachineReconciler) powerOffVM(ctx goctx.Context, machineCtx *context
log := ctrl.LoggerFrom(ctx)

if ok := acquireTicketForUpdatingVM(machineCtx.ElfMachine.Name); !ok {
log.V(1).Info(fmt.Sprintf("The VM operation reaches rate limit, skip powering off VM %s", machineCtx.ElfMachine.Status.VMRef))
log.V(1).Info("The VM operation reaches rate limit, skip powering off VM " + machineCtx.ElfMachine.Status.VMRef)

Check warning on line 816 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L816

Added line #L816 was not covered by tests

return nil
}
Expand Down Expand Up @@ -849,11 +848,11 @@ func (r *ElfMachineReconciler) powerOnVM(ctx goctx.Context, machineCtx *context.
return nil
}

log.V(1).Info(fmt.Sprintf("%s and the retry silence period passes, will try to power on the VM again", message))
log.V(1).Info(message + " and the retry silence period passes, will try to power on the VM again")
}

if ok := acquireTicketForUpdatingVM(machineCtx.ElfMachine.Name); !ok {
log.V(1).Info(fmt.Sprintf("The VM operation reaches rate limit, skip power on VM %s", machineCtx.ElfMachine.Status.VMRef))
log.V(1).Info("The VM operation reaches rate limit, skip power on VM " + machineCtx.ElfMachine.Status.VMRef)

Check warning on line 855 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L855

Added line #L855 was not covered by tests

return nil
}
Expand Down Expand Up @@ -885,7 +884,7 @@ func (r *ElfMachineReconciler) updateVM(ctx goctx.Context, machineCtx *context.M
log := ctrl.LoggerFrom(ctx)

if ok := acquireTicketForUpdatingVM(machineCtx.ElfMachine.Name); !ok {
log.V(1).Info(fmt.Sprintf("The VM operation reaches rate limit, skip updating VM %s", machineCtx.ElfMachine.Status.VMRef))
log.V(1).Info("The VM operation reaches rate limit, skip updating VM " + machineCtx.ElfMachine.Status.VMRef)

Check warning on line 887 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L887

Added line #L887 was not covered by tests

return nil
}
Expand Down Expand Up @@ -1005,8 +1004,8 @@ func (r *ElfMachineReconciler) reconcileVMFailedTask(ctx goctx.Context, machineC
conditions.MarkFalse(machineCtx.ElfMachine, infrav1.VMProvisionedCondition, infrav1.TaskFailureReason, clusterv1.ConditionSeverityInfo, errorMessage)

if service.IsCloudInitConfigError(errorMessage) {
machineCtx.ElfMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capeerrors.CloudInitConfigError)
machineCtx.ElfMachine.Status.FailureMessage = pointer.String(fmt.Sprintf("VM cloud-init config error: %s", service.FormatCloudInitError(errorMessage)))
machineCtx.ElfMachine.Status.FailureReason = ptr.To(capeerrors.CloudInitConfigError)
machineCtx.ElfMachine.Status.FailureMessage = ptr.To("VM cloud-init config error: " + service.FormatCloudInitError(errorMessage))
}

log.Error(errors.New("VM task failed"), "", "vmRef", vmRef, "taskRef", taskRef, "taskErrorMessage", errorMessage, "taskErrorCode", service.GetTowerString(task.ErrorCode), "taskDescription", service.GetTowerString(task.Description))
Expand Down Expand Up @@ -1038,13 +1037,13 @@ func (r *ElfMachineReconciler) reconcileVMFailedTask(ctx goctx.Context, machineC
setVMDuplicate(machineCtx.ElfMachine.Name)
case service.IsStorageInsufficientError(errorMessage):
recordElfClusterStorageInsufficient(machineCtx, true)
message := fmt.Sprintf("Insufficient storage detected for the ELF cluster %s", machineCtx.ElfCluster.Spec.Cluster)
message := "Insufficient storage detected for the ELF cluster " + machineCtx.ElfCluster.Spec.Cluster
log.Info(message)

return errors.New(message)
case service.IsMemoryInsufficientError(errorMessage):
recordElfClusterMemoryInsufficient(machineCtx, true)
message := fmt.Sprintf("Insufficient memory detected for the ELF cluster %s", machineCtx.ElfCluster.Spec.Cluster)
message := "Insufficient memory detected for the ELF cluster " + machineCtx.ElfCluster.Spec.Cluster
log.Info(message)

return errors.New(message)
Expand Down Expand Up @@ -1075,7 +1074,7 @@ func (r *ElfMachineReconciler) reconcileProviderID(ctx goctx.Context, machineCtx
}

if machineCtx.ElfMachine.Spec.ProviderID == nil || *machineCtx.ElfMachine.Spec.ProviderID != providerID {
machineCtx.ElfMachine.Spec.ProviderID = pointer.String(providerID)
machineCtx.ElfMachine.Spec.ProviderID = ptr.To(providerID)

log.Info("updated providerID", "providerID", providerID)
}
Expand Down Expand Up @@ -1185,7 +1184,7 @@ func (r *ElfMachineReconciler) reconcileNetwork(ctx goctx.Context, machineCtx *c
return false, err
}

for i := 0; i < len(nics); i++ {
for i := range len(nics) {
nic := nics[i]
ip := service.GetTowerString(nic.IPAddress)

Expand Down Expand Up @@ -1281,7 +1280,7 @@ func (r *ElfMachineReconciler) reconcileLabels(ctx goctx.Context, machineCtx *co

// If the virtual machine has been labeled with managed label,
// it is considered that all labels have been labeled.
for i := 0; i < len(vm.Labels); i++ {
for i := range len(vm.Labels) {
if *vm.Labels[i].ID == *capeManagedLabel.ID {
return true, nil
}
Expand Down Expand Up @@ -1429,14 +1428,14 @@ func (r *ElfMachineReconciler) deleteDuplicateVMs(ctx goctx.Context, machineCtx

if machineCtx.ElfMachine.Status.VMRef == "" {
vmIDs := make([]string, 0, len(vms))
for i := 0; i < len(vms); i++ {
for i := range len(vms) {
vmIDs = append(vmIDs, *vms[i].ID)
}
log.Info("Waiting for ElfMachine to select one of the duplicate VMs before deleting the other", "vms", vmIDs)
return reconcile.Result{RequeueAfter: config.DefaultRequeueTimeout}, nil
}

for i := 0; i < len(vms); i++ {
for i := range len(vms) {
// Do not delete already running virtual machines to avoid deleting already used virtual machines.
if *vms[i].ID == machineCtx.ElfMachine.Status.VMRef ||
*vms[i].LocalID == machineCtx.ElfMachine.Status.VMRef ||
Expand Down
Loading

0 comments on commit 521fdee

Please sign in to comment.