From ef04ef6361c0bdcbb37e4d04e7c37521e2c092e8 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 27 Mar 2024 19:43:22 +0000 Subject: [PATCH 1/2] golangci-lint: use exclude-rules instead of skip-files and skip-dirs Signed-off-by: Matthieu MOREL --- golangci.yaml | 41 +++++++++++++------ internal/hook/hook_tracker_test.go | 1 - internal/hook/item_hook_handler_test.go | 2 - internal/hook/wait_exec_hook_handler_test.go | 2 - .../resource_modifiers_test.go | 1 - .../resourcepolicies/volume_resources_test.go | 3 -- internal/velero/images_test.go | 1 - pkg/backup/backup_test.go | 4 +- pkg/backup/item_collector_test.go | 1 - pkg/backup/remap_crd_version_action_test.go | 2 - pkg/backup/service_account_action_test.go | 2 - pkg/buildinfo/buildinfo_test.go | 1 - pkg/client/config_test.go | 1 - pkg/cmd/cli/backup/create_test.go | 2 - pkg/cmd/cli/backup/get_test.go | 4 +- pkg/cmd/cli/backuplocation/delete_test.go | 2 - pkg/cmd/cli/restore/create_test.go | 1 - pkg/cmd/cli/restore/get_test.go | 2 +- pkg/cmd/util/flag/map_test.go | 1 - .../backup_structured_describer_test.go | 1 - pkg/controller/backup_controller_test.go | 2 - .../backup_deletion_controller_test.go | 5 --- .../backup_repository_controller_test.go | 1 - .../data_download_controller_test.go | 4 +- pkg/controller/data_upload_controller_test.go | 3 +- pkg/controller/restore_controller_test.go | 2 - .../restore_finalizer_controller_test.go | 3 -- pkg/exposer/csi_snapshot_test.go | 1 - pkg/persistence/object_store_test.go | 1 - .../clientmgmt/process/client_builder_test.go | 1 - pkg/podvolume/backupper_test.go | 4 +- pkg/podvolume/restorer_test.go | 1 - pkg/podvolume/util_test.go | 1 - pkg/repository/provider/unified_repo_test.go | 2 +- pkg/restic/command_factory_test.go | 1 - pkg/restore/merge_service_account_test.go | 1 - pkg/restore/restore_test.go | 3 -- pkg/restore/service_action_test.go | 1 - pkg/uploader/kopia/snapshot_test.go | 1 - pkg/uploader/provider/kopia_test.go | 2 +- pkg/uploader/provider/provider_test.go | 1 - pkg/uploader/provider/restic_test.go | 1 - pkg/util/azure/credential_test.go | 1 - .../collections/includes_excludes_test.go | 1 - pkg/util/csi/util_test.go | 1 - pkg/util/kube/utils_test.go | 1 - test/e2e/backup/backup.go | 1 - test/e2e/backups/sync_backups.go | 1 - test/e2e/backups/ttl.go | 3 -- .../api-group/enable_api_group_extentions.go | 1 - .../api-group/enable_api_group_versions.go | 3 -- test/e2e/basic/storage-class-changing.go | 1 - test/e2e/bsl-mgmt/deletion.go | 1 - test/e2e/e2e_suite_test.go | 1 - test/e2e/migration/migration.go | 1 - test/e2e/pv-backup/pv-backup-filter.go | 1 - .../resource-filtering/exclude_namespaces.go | 1 - .../resource-filtering/include_namespaces.go | 1 - .../e2e/resourcepolicies/resource_policies.go | 1 - test/e2e/schedule/schedule.go | 1 - test/e2e/upgrade/upgrade.go | 2 - test/pkg/client/config.go | 1 - test/util/k8s/deployment.go | 1 - test/util/k8s/persistentvolumes.go | 1 - test/util/k8s/pod.go | 1 - test/util/k8s/rbac.go | 2 - test/util/providers/aws_utils.go | 1 - test/util/providers/azure_utils.go | 2 - test/util/velero/velero_utils.go | 6 --- 69 files changed, 39 insertions(+), 117 deletions(-) diff --git a/golangci.yaml b/golangci.yaml index 97ee4229da..358c5834f3 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -19,24 +19,12 @@ run: # "/" will be replaced by current OS file path separator to properly work # on Windows. skip-dirs: - - test/* - pkg/plugin/generated/* - # - autogenerated_by_my_lib # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true - # which files to skip: they will be analyzed, but issues from them - # won't be reported. Default value is empty list, but there is - # no need to include all autogenerated files, we confidently recognize - # autogenerated files. If it's not please let us know. - # "/" will be replaced by current OS file path separator to properly work - # on Windows. - skip-files: - - ".*_test.go$" - # - lib/bad.go - # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": # If invoked with -mod=readonly, the go command is disallowed from the implicit # automatic updating of go.mod described above. Instead, it fails when any changes @@ -364,6 +352,35 @@ issues: - linters: - staticcheck text: "DefaultVolumesToRestic" # No need to report deprecate for DefaultVolumesToRestic. + - path: ".*_test.go$" + linters: + - dupword + - errcheck + - ginkgolinter + - goconst + - gosec + - govet + - staticcheck + - stylecheck + - unconvert + - unparam + - unused + - path: test/ + linters: + - bodyclose + - dupword + - errcheck + - goconst + - gosec + - gosimple + - ginkgolinter + - nilerr + - noctx + - staticcheck + - stylecheck + - unconvert + - unparam + - unused # The list of ids of default excludes to include or disable. By default it's empty. include: diff --git a/internal/hook/hook_tracker_test.go b/internal/hook/hook_tracker_test.go index 9e6ca95d33..dde7b85210 100644 --- a/internal/hook/hook_tracker_test.go +++ b/internal/hook/hook_tracker_test.go @@ -67,7 +67,6 @@ func TestHookTracker_Record(t *testing.T) { err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", PhasePre, true) assert.NotNil(t, err) - } func TestHookTracker_Stat(t *testing.T) { diff --git a/internal/hook/item_hook_handler_test.go b/internal/hook/item_hook_handler_test.go index 8b73572b64..6ab9b1528f 100644 --- a/internal/hook/item_hook_handler_test.go +++ b/internal/hook/item_hook_handler_test.go @@ -2351,14 +2351,12 @@ func TestBackupHookTracker(t *testing.T) { } } h.HandleHooks(velerotest.NewLogger(), groupResource, pod.item, pod.hooks, test.phase, hookTracker) - } actualAtemptted, actualFailed := hookTracker.Stat() assert.Equal(t, test.expectedHookAttempted, actualAtemptted) assert.Equal(t, test.expectedHookFailed, actualFailed) }) } - } func TestRestoreHookTrackerAdd(t *testing.T) { diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 3f23542744..2f8d7ad7ac 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -710,7 +710,6 @@ func TestWaitExecHandleHooks(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - source := fcache.NewFakeControllerSource() go func() { // This is the state of the pod that will be seen by the AddFunc handler. @@ -1251,7 +1250,6 @@ func TestRestoreHookTrackerUpdate(t *testing.T) { for _, test := range tests1 { t.Run(test.name, func(t *testing.T) { - source := fcache.NewFakeControllerSource() go func() { // This is the state of the pod that will be seen by the AddFunc handler. diff --git a/internal/resourcemodifiers/resource_modifiers_test.go b/internal/resourcemodifiers/resource_modifiers_test.go index 648d827be9..8b4384ef1e 100644 --- a/internal/resourcemodifiers/resource_modifiers_test.go +++ b/internal/resourcemodifiers/resource_modifiers_test.go @@ -428,7 +428,6 @@ func TestGetResourceModifiersFromConfig(t *testing.T) { } func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { - pvcStandardSc := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "v1", diff --git a/internal/resourcepolicies/volume_resources_test.go b/internal/resourcepolicies/volume_resources_test.go index c73bb692d8..f852baff3b 100644 --- a/internal/resourcepolicies/volume_resources_test.go +++ b/internal/resourcepolicies/volume_resources_test.go @@ -86,7 +86,6 @@ func TestCapacityIsInRange(t *testing.T) { actual := test.capacity.isInRange(test.quantity) assert.Equal(t, test.isInRange, actual) - }) } } @@ -141,7 +140,6 @@ func TestStorageClassConditionMatch(t *testing.T) { } func TestNFSConditionMatch(t *testing.T) { - tests := []struct { name string condition *nfsCondition @@ -196,7 +194,6 @@ func TestNFSConditionMatch(t *testing.T) { } func TestCSIConditionMatch(t *testing.T) { - tests := []struct { name string condition *csiCondition diff --git a/internal/velero/images_test.go b/internal/velero/images_test.go index 7d6bdf8cd9..ad73452930 100644 --- a/internal/velero/images_test.go +++ b/internal/velero/images_test.go @@ -128,7 +128,6 @@ func testDefaultImage(t *testing.T, defaultImageFn func() string, imageName stri assert.Equal(t, tc.want, defaultImageFn()) }) } - } func TestDefaultVeleroImage(t *testing.T) { diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 32270c8cf9..0ceb0572a6 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -154,8 +154,8 @@ func TestBackupProgressIsUpdated(t *testing.T) { h.backupper.Backup(h.log, req, backupFile, nil, nil) require.NotNil(t, req.Status.Progress) - assert.Equal(t, len(req.BackedUpItems), req.Status.Progress.TotalItems) - assert.Equal(t, len(req.BackedUpItems), req.Status.Progress.ItemsBackedUp) + assert.Len(t, req.BackedUpItems, req.Status.Progress.TotalItems) + assert.Len(t, req.BackedUpItems, req.Status.Progress.ItemsBackedUp) } // TestBackupResourceFiltering runs backups with different combinations diff --git a/pkg/backup/item_collector_test.go b/pkg/backup/item_collector_test.go index f44743dfb5..467ce96a75 100644 --- a/pkg/backup/item_collector_test.go +++ b/pkg/backup/item_collector_test.go @@ -76,5 +76,4 @@ func TestSortOrderedResource(t *testing.T) { } sortedPvResources := sortResourcesByOrder(log, pvResources, pvOrder) assert.Equal(t, sortedPvResources, expectedPvResources) - } diff --git a/pkg/backup/remap_crd_version_action_test.go b/pkg/backup/remap_crd_version_action_test.go index bbcedd32b9..b50f82de4b 100644 --- a/pkg/backup/remap_crd_version_action_test.go +++ b/pkg/backup/remap_crd_version_action_test.go @@ -136,7 +136,6 @@ func TestRemapCRDVersionAction(t *testing.T) { // set it back to the default one a.discoveryHelper = fakeDiscoveryHelper() }) - } // TestRemapCRDVersionActionData tests the RemapCRDVersionAction plugin against actual CRD to confirm that the v1beta1 version is returned when the v1 version is passed in to the plugin. @@ -217,7 +216,6 @@ func TestRemapCRDVersionActionData(t *testing.T) { betaClient.Delete(context.TODO(), crd.Name, metav1.DeleteOptions{}) }) } - } func fakeDiscoveryHelper() velerodiscovery.Helper { diff --git a/pkg/backup/service_account_action_test.go b/pkg/backup/service_account_action_test.go index b17b9748cb..9bb813d6d5 100644 --- a/pkg/backup/service_account_action_test.go +++ b/pkg/backup/service_account_action_test.go @@ -400,7 +400,6 @@ func TestServiceAccountActionExecute(t *testing.T) { assert.Equal(t, test.expectedAdditionalItems, additional) }) } - } func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { @@ -608,5 +607,4 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { assert.Equal(t, test.expectedAdditionalItems, additional) }) } - } diff --git a/pkg/buildinfo/buildinfo_test.go b/pkg/buildinfo/buildinfo_test.go index 40631525a1..be12bdcab6 100644 --- a/pkg/buildinfo/buildinfo_test.go +++ b/pkg/buildinfo/buildinfo_test.go @@ -49,5 +49,4 @@ func TestFormattedGitSHA(t *testing.T) { assert.Equal(t, FormattedGitSHA(), test.expected) }) } - } diff --git a/pkg/client/config_test.go b/pkg/client/config_test.go index 56b198f793..b8e593d216 100644 --- a/pkg/client/config_test.go +++ b/pkg/client/config_test.go @@ -48,7 +48,6 @@ func removeConfigfileName() error { return nil } func TestConfigOperations(t *testing.T) { - preHomeEnv := "" prevEnv := os.Environ() for _, entry := range prevEnv { diff --git a/pkg/cmd/cli/backup/create_test.go b/pkg/cmd/cli/backup/create_test.go index e115bbd199..084d67becb 100644 --- a/pkg/cmd/cli/backup/create_test.go +++ b/pkg/cmd/cli/backup/create_test.go @@ -148,7 +148,6 @@ func TestCreateOptions_OrderedResources(t *testing.T) { "persistentvolumes": "pv1,pv2", } assert.Equal(t, orderedResources, expectedMixedResources) - } func TestCreateCommand(t *testing.T) { @@ -156,7 +155,6 @@ func TestCreateCommand(t *testing.T) { args := []string{name} t.Run("create a backup create command with full options except fromSchedule and wait, then run by create option", func(t *testing.T) { - // create a factory f := &factorymocks.Factory{} diff --git a/pkg/cmd/cli/backup/get_test.go b/pkg/cmd/cli/backup/get_test.go index af37953522..628b92e627 100644 --- a/pkg/cmd/cli/backup/get_test.go +++ b/pkg/cmd/cli/backup/get_test.go @@ -77,7 +77,7 @@ func TestNewGetCommand(t *testing.T) { i++ } } - assert.Equal(t, len(args), i) + assert.Len(t, args, i) } d := NewGetCommand(f, "velero backup get") @@ -98,6 +98,6 @@ func TestNewGetCommand(t *testing.T) { i++ } } - assert.Equal(t, len(args), i) + assert.Len(t, args, i) } } diff --git a/pkg/cmd/cli/backuplocation/delete_test.go b/pkg/cmd/cli/backuplocation/delete_test.go index 0ed5d3130b..2ebddca80a 100644 --- a/pkg/cmd/cli/backuplocation/delete_test.go +++ b/pkg/cmd/cli/backuplocation/delete_test.go @@ -35,7 +35,6 @@ import ( ) func TestNewDeleteCommand(t *testing.T) { - // create a factory f := &factorymocks.Factory{} kbclient := velerotest.NewFakeControllerRuntimeClient(t) @@ -75,7 +74,6 @@ func TestNewDeleteCommand(t *testing.T) { return } t.Fatalf("process ran with err %v, want backups by get()", err) - } func TestDeleteFunctions(t *testing.T) { //t.Run("create the other create command with fromSchedule option for Run() other branches", func(t *testing.T) { diff --git a/pkg/cmd/cli/restore/create_test.go b/pkg/cmd/cli/restore/create_test.go index 6a68018c47..4fbe7f3723 100644 --- a/pkg/cmd/cli/restore/create_test.go +++ b/pkg/cmd/cli/restore/create_test.go @@ -59,7 +59,6 @@ func TestCreateCommand(t *testing.T) { args := []string{name} t.Run("create a backup create command with full options except fromSchedule and wait, then run by create option", func(t *testing.T) { - // create a factory f := &factorymocks.Factory{} diff --git a/pkg/cmd/cli/restore/get_test.go b/pkg/cmd/cli/restore/get_test.go index 211550087b..fcb5a04c42 100644 --- a/pkg/cmd/cli/restore/get_test.go +++ b/pkg/cmd/cli/restore/get_test.go @@ -76,6 +76,6 @@ func TestNewGetCommand(t *testing.T) { i++ } } - require.Equal(t, len(args), i) + require.Len(t, args, i) } } diff --git a/pkg/cmd/util/flag/map_test.go b/pkg/cmd/util/flag/map_test.go index c0a1dee7ef..70a71fbb18 100644 --- a/pkg/cmd/util/flag/map_test.go +++ b/pkg/cmd/util/flag/map_test.go @@ -71,7 +71,6 @@ func TestSetOfMap(t *testing.T) { assert.EqualValues(t, c.expected, m.Data()) }) } - } func TestStringOfMap(t *testing.T) { diff --git a/pkg/cmd/util/output/backup_structured_describer_test.go b/pkg/cmd/util/output/backup_structured_describer_test.go index afddec0a2a..58a141cd9c 100644 --- a/pkg/cmd/util/output/backup_structured_describer_test.go +++ b/pkg/cmd/util/output/backup_structured_describer_test.go @@ -618,5 +618,4 @@ func TestDescribeDeleteBackupRequestsInSF(t *testing.T) { assert.True(tt, reflect.DeepEqual(sd.output, tc.expect)) }) } - } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 7dd3922996..37caf839e4 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -435,7 +435,6 @@ func TestDefaultBackupTTL(t *testing.T) { ) t.Run(test.name, func(t *testing.T) { - apiServer := velerotest.NewAPIServer(t) discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, logger) require.NoError(t, err) @@ -1735,6 +1734,5 @@ func TestPatchResourceWorksWithStatus(t *testing.T) { t.Error(cmp.Diff(fromCluster, tt.args.updated)) } }) - } } diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index a524b1adf6..cc83c29f2e 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -75,7 +75,6 @@ func defaultTestDbr() *velerov1api.DeleteBackupRequest { } func setupBackupDeletionControllerTest(t *testing.T, req *velerov1api.DeleteBackupRequest, objects ...runtime.Object) *backupDeletionControllerTestData { - var ( fakeClient = velerotest.NewFakeControllerRuntimeClient(t, append(objects, req)...) volumeSnapshotter = &velerotest.FakeVolumeSnapshotter{SnapshotsTaken: sets.NewString()} @@ -215,7 +214,6 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { }) t.Run("unable to find backup", func(t *testing.T) { - td := setupBackupDeletionControllerTest(t, defaultTestDbr()) _, err := td.controller.Reconcile(context.TODO(), td.req) @@ -261,7 +259,6 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { assert.Equal(t, "cannot delete backup because backup storage location default is currently in read-only mode", res.Status.Errors[0]) }) t.Run("full delete, no errors", func(t *testing.T) { - input := defaultTestDbr() // Clear out resource labels to make sure the controller adds them and does not @@ -668,7 +665,6 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { err = td.fakeClient.Get(ctx, td.req.NamespacedName, res) assert.True(t, apierrors.IsNotFound(err), "Expected not found error, but actual value of error: %v", err) td.backupStore.AssertNotCalled(t, "DeleteBackup", mock.Anything) - }) t.Run("Expired request will not be deleted if the status is not processed", func(t *testing.T) { @@ -690,7 +686,6 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { assert.Equal(t, "Processed", string(res.Status.Phase)) assert.Len(t, res.Status.Errors, 1) assert.Equal(t, "backup not found", res.Status.Errors[0]) - }) } diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 22c64b45e8..70dfd1e799 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -57,7 +57,6 @@ func mockBackupRepositoryCR() *velerov1api.BackupRepository { MaintenanceFrequency: metav1.Duration{Duration: testMaintenanceFrequency}, }, } - } func TestPatchBackupRepository(t *testing.T) { diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index e65b2afff1..b0abc7a395 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -526,7 +526,6 @@ func TestOnDataDownloadCompleted(t *testing.T) { ep := exposermockes.NewGenericRestoreExposer(t) if test.rebindVolumeErr { ep.On("RebindVolume", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("Error to rebind volume")) - } else { ep.On("RebindVolume", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) } @@ -813,7 +812,6 @@ func TestTryCancelDataDownload(t *testing.T) { } func TestUpdateDataDownloadWithRetry(t *testing.T) { - namespacedName := types.NamespacedName{ Name: dataDownloadName, Namespace: "velero", @@ -1026,7 +1024,7 @@ func TestAttemptDataDownloadResume(t *testing.T) { } else { assert.NoError(t, err) - // Verify DataDownload marked as Cancelled + // Verify DataDownload marked as Canceled for _, duName := range test.cancelledDataDownloads { dataUpload := &velerov2alpha1api.DataDownload{} err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index bf9a5674d1..eb440c7686 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -885,7 +885,6 @@ func TestTryCancelDataUpload(t *testing.T) { } func TestUpdateDataUploadWithRetry(t *testing.T) { - namespacedName := types.NamespacedName{ Name: dataUploadName, Namespace: "velero", @@ -1097,7 +1096,7 @@ func TestAttemptDataUploadResume(t *testing.T) { } else { assert.NoError(t, err) - // Verify DataUploads marked as Cancelled + // Verify DataUploads marked as Canceled for _, duName := range test.cancelledDataUploads { dataUpload := &velerov2alpha1api.DataUpload{} err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index 7b67aa5693..95628c3cf2 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -53,7 +53,6 @@ import ( ) func TestFetchBackupInfo(t *testing.T) { - tests := []struct { name string backupName string @@ -206,7 +205,6 @@ func TestProcessQueueItemSkips(t *testing.T) { } func TestRestoreReconcile(t *testing.T) { - defaultStorageLocation := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result() now, err := time.Parse(time.RFC1123Z, time.RFC1123Z) diff --git a/pkg/controller/restore_finalizer_controller_test.go b/pkg/controller/restore_finalizer_controller_test.go index 5bc25fca36..76f4d295f5 100644 --- a/pkg/controller/restore_finalizer_controller_test.go +++ b/pkg/controller/restore_finalizer_controller_test.go @@ -178,7 +178,6 @@ func TestRestoreFinalizerReconcile(t *testing.T) { } }) } - } func TestUpdateResult(t *testing.T) { @@ -453,7 +452,6 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { assert.Equal(t, expectedPVInfo.ReclaimPolicy, string(pv.Spec.PersistentVolumeReclaimPolicy)) assert.Equal(t, expectedPVInfo.Labels, pv.Labels) } - } } @@ -489,5 +487,4 @@ func TestGetRestoredPVCFromRestoredResourceList(t *testing.T) { } actual = getRestoredPVCFromRestoredResourceList(restoredResourceList) assert.Equal(t, expected, actual) - } diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index fe75026372..85e8e96da1 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -440,7 +440,6 @@ func TestExpose(t *testing.T) { } else { assert.EqualError(t, err, test.err) } - }) } } diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index de3da29f1a..a63d31825c 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -1142,7 +1142,6 @@ func TestGetBackupVolumeInfos(t *testing.T) { if len(tc.expectedResult) > 0 { require.Equal(t, tc.expectedResult, result) } - }) } } diff --git a/pkg/plugin/clientmgmt/process/client_builder_test.go b/pkg/plugin/clientmgmt/process/client_builder_test.go index d822d43cbd..a8bf3ad46e 100644 --- a/pkg/plugin/clientmgmt/process/client_builder_test.go +++ b/pkg/plugin/clientmgmt/process/client_builder_test.go @@ -51,7 +51,6 @@ func TestNewClientBuilder(t *testing.T) { assert.Equal(t, []string{"run-plugins", "--log-level", "info", "--features", "feature1,feature2"}, cb.commandArgs) // Clear the features list in case other tests run in the same process. features.NewFeatureFlagSet() - } func TestClientConfig(t *testing.T) { diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index 618d380aa4..bf563bdd55 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -197,7 +197,6 @@ func Test_backupper_BackupPodVolumes_log_test(t *testing.T) { b.BackupPodVolumes(tt.args.backup, tt.args.pod, tt.args.volumesToBackup, tt.args.resPolicies, log) fmt.Println(logOutput.String()) assert.Contains(t, logOutput.String(), tt.wantLog) - }) } } @@ -495,7 +494,7 @@ func TestBackupPodVolumes(t *testing.T) { bsl: "fake-bsl", }, { - name: "context cancelled", + name: "context canceled", ctx: ctxWithCancel, volumes: []string{ "fake-volume-1", @@ -616,7 +615,6 @@ func TestBackupPodVolumes(t *testing.T) { for _, pvb := range test.retPVBs { bp.(*backupper).results[resultsKey(test.sourcePod.Namespace, test.sourcePod.Name)] <- pvb } - } }() diff --git a/pkg/podvolume/restorer_test.go b/pkg/podvolume/restorer_test.go index f630b0fd5f..efccc3ebc1 100644 --- a/pkg/podvolume/restorer_test.go +++ b/pkg/podvolume/restorer_test.go @@ -409,7 +409,6 @@ func TestRestorePodVolumes(t *testing.T) { for _, pvr := range test.retPVRs { rs.(*restorer).results[resultsKey(test.restoredPod.Namespace, test.restoredPod.Name)] <- pvr } - } }() diff --git a/pkg/podvolume/util_test.go b/pkg/podvolume/util_test.go index 79e68450cf..395b45a3fc 100644 --- a/pkg/podvolume/util_test.go +++ b/pkg/podvolume/util_test.go @@ -298,6 +298,5 @@ func TestVolumeHasNonRestorableSource(t *testing.T) { actual := volumeHasNonRestorableSource(tc.volumeName, tc.podVolumes) assert.Equal(t, tc.expected, actual) }) - } } diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index 44d7301c40..5d59c151b8 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -1000,7 +1000,7 @@ func TestBatchForget(t *testing.T) { }) if tc.expectedErr == nil { - assert.Equal(t, 0, len(errs)) + assert.Empty(t, errs) } else { assert.Equal(t, len(tc.expectedErr), len(errs)) diff --git a/pkg/restic/command_factory_test.go b/pkg/restic/command_factory_test.go index c0a627ad1f..75f20d47a5 100644 --- a/pkg/restic/command_factory_test.go +++ b/pkg/restic/command_factory_test.go @@ -89,7 +89,6 @@ func TestGetSnapshotCommand(t *testing.T) { assert.Equal(t, expectedFlags, actualFlags) assert.Equal(t, expectedTags, actualTags) - } func TestInitCommand(t *testing.T) { diff --git a/pkg/restore/merge_service_account_test.go b/pkg/restore/merge_service_account_test.go index e58c423744..4322ae0033 100644 --- a/pkg/restore/merge_service_account_test.go +++ b/pkg/restore/merge_service_account_test.go @@ -361,7 +361,6 @@ func TestMergeMaps(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result := mergeMaps(tc.destination, tc.source) assert.Equal(t, tc.expected, result) diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index ee735386db..dd91382371 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -1859,7 +1859,6 @@ func TestRestoreWithAsyncOperations(t *testing.T) { UpdatedItem: obj, OperationID: obj.GetName() + "-1", }, nil - }, progressFunc: func(operationID string, restore *velerov1api.Restore) (velero.OperationProgress, error) { return velero.OperationProgress{ @@ -1881,7 +1880,6 @@ func TestRestoreWithAsyncOperations(t *testing.T) { UpdatedItem: obj, OperationID: obj.GetName() + "-1", }, nil - }, progressFunc: func(operationID string, restore *velerov1api.Restore) (velero.OperationProgress, error) { return velero.OperationProgress{ @@ -1902,7 +1900,6 @@ func TestRestoreWithAsyncOperations(t *testing.T) { return &velero.RestoreItemActionExecuteOutput{ UpdatedItem: obj, }, nil - }, } diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index be722fa2a0..16115e9202 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -65,7 +65,6 @@ func svcJSONFromUnstructured(ports ...map[string]interface{}) string { } func TestServiceActionExecute(t *testing.T) { - tests := []struct { name string obj corev1api.Service diff --git a/pkg/uploader/kopia/snapshot_test.go b/pkg/uploader/kopia/snapshot_test.go index 3021389fba..86c4e69714 100644 --- a/pkg/uploader/kopia/snapshot_test.go +++ b/pkg/uploader/kopia/snapshot_test.go @@ -79,7 +79,6 @@ func MockFuncs(s *snapshotMockes, args []mockArgs) { } func TestSnapshotSource(t *testing.T) { - ctx := context.TODO() sourceInfo := snapshot.SourceInfo{ UserName: "testUserName", diff --git a/pkg/uploader/provider/kopia_test.go b/pkg/uploader/provider/kopia_test.go index f1f16fb926..f6316c965a 100644 --- a/pkg/uploader/provider/kopia_test.go +++ b/pkg/uploader/provider/kopia_test.go @@ -228,7 +228,7 @@ func TestCheckContext(t *testing.T) { kp.CheckContext(ctx, tc.finishChan, tc.restoreChan, tc.uploader) if tc.expectCancel && tc.uploader != nil { - t.Error("Expected the uploader to be cancelled") + t.Error("Expected the uploader to be canceled") } if tc.expectBackup && tc.uploader == nil && len(tc.restoreChan) > 0 { diff --git a/pkg/uploader/provider/provider_test.go b/pkg/uploader/provider/provider_test.go index e04ff78b83..1d90c51f19 100644 --- a/pkg/uploader/provider/provider_test.go +++ b/pkg/uploader/provider/provider_test.go @@ -85,7 +85,6 @@ func TestNewUploaderProvider(t *testing.T) { mockFileGetter := &mocks.FileStore{} mockFileGetter.On("Path", &v1.SecretKeySelector{}).Return("", nil) credGetter.FromFile = mockFileGetter - } _, err := NewUploaderProvider(ctx, client, testCase.UploaderType, testCase.RequestorType, repoIdentifier, bsl, backupRepo, credGetter, repoKeySelector, log) if testCase.ExpectedError == "" { diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index c653ee1b51..7e828f948b 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -232,7 +232,6 @@ func TestResticRunRestore(t *testing.T) { require.Equal(t, true, tc.errorHandleFunc(err)) }) } - } func TestClose(t *testing.T) { diff --git a/pkg/util/azure/credential_test.go b/pkg/util/azure/credential_test.go index 16381c5cdb..bdd33eadb0 100644 --- a/pkg/util/azure/credential_test.go +++ b/pkg/util/azure/credential_test.go @@ -47,7 +47,6 @@ func TestNewCredential(t *testing.T) { } _, err = NewCredential(creds, options) require.Nil(t, err) - } func Test_newConfigCredential(t *testing.T) { diff --git a/pkg/util/collections/includes_excludes_test.go b/pkg/util/collections/includes_excludes_test.go index fe084f4884..655e3a50e3 100644 --- a/pkg/util/collections/includes_excludes_test.go +++ b/pkg/util/collections/includes_excludes_test.go @@ -729,7 +729,6 @@ func TestGetScopedResourceIncludesExcludes(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - logger := logrus.StandardLogger() nsIncludeExclude := NewIncludesExcludes() resources := GetScopeResourceIncludesExcludes(setupDiscoveryClientWithResources(tc.apiResources), logger, tc.namespaceScopedIncludes, tc.namespaceScopedExcludes, tc.clusterScopedIncludes, tc.clusterScopedExcludes, *nsIncludeExclude) diff --git a/pkg/util/csi/util_test.go b/pkg/util/csi/util_test.go index a9b0a37aa3..8a3ea3cc1c 100644 --- a/pkg/util/csi/util_test.go +++ b/pkg/util/csi/util_test.go @@ -25,7 +25,6 @@ import ( ) func TestCSIFeatureNotEnabledAndPluginIsFromCSI(t *testing.T) { - features.NewFeatureFlagSet("EnableCSI") require.False(t, ShouldSkipAction("abc")) require.False(t, ShouldSkipAction("velero.io/csi-pvc-backupper")) diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 31110dc8c0..141fb45cee 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -138,7 +138,6 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { assert.Equal(t, test.expectedCreatedResult, nsCreated) }) } - } // TestGetVolumeDirectorySuccess tests that the GetVolumeDirectory function diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index 38d3ed6878..fcbc7ee640 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -58,7 +58,6 @@ func BackupRestoreRetainedPVWithRestic() { } func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) { - var ( backupName, restoreName, kibishiiNamespace string err error diff --git a/test/e2e/backups/sync_backups.go b/test/e2e/backups/sync_backups.go index fd7e076c3c..24caace43f 100644 --- a/test/e2e/backups/sync_backups.go +++ b/test/e2e/backups/sync_backups.go @@ -73,7 +73,6 @@ func BackupsSyncTest() { Expect(VeleroUninstall(ctx, veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace)).To(Succeed()) } } - }) It("Backups in object storage should be synced to a new Velero successfully", func() { diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index 49045266e7..409773264f 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -51,7 +51,6 @@ func (b *TTL) Init() { b.backupName = "backup-ttl-test-" + UUIDgen.String() b.restoreName = "restore-ttl-test-" + UUIDgen.String() b.ttl = 10 * time.Minute - } func TTLTest() { @@ -159,7 +158,6 @@ func TTLTest() { veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig, test.restoreName, RestoreObjectsPrefix)).NotTo(HaveOccurred(), "Fail to get restore object") - }) By("Check TTL was set correctly", func() { @@ -198,7 +196,6 @@ func TTLTest() { veleroCfg.CloudCredentialsFile, veleroCfg.BSLBucket, veleroCfg.BSLPrefix, veleroCfg.BSLConfig, test.restoreName, RestoreObjectsPrefix, 5)).NotTo(HaveOccurred(), "Fail to get restore object") - }) }) } diff --git a/test/e2e/basic/api-group/enable_api_group_extentions.go b/test/e2e/basic/api-group/enable_api_group_extentions.go index 05eeb8256d..d2f7dd13a0 100644 --- a/test/e2e/basic/api-group/enable_api_group_extentions.go +++ b/test/e2e/basic/api-group/enable_api_group_extentions.go @@ -91,7 +91,6 @@ func APIExtensionsVersionsTest() { veleroCfg.ClientToInstallVelero = veleroCfg.DefaultClient }) } - }) Context("When EnableAPIGroupVersions flag is set", func() { It("Enable API Group to B/R CRD APIExtensionsVersions", func() { diff --git a/test/e2e/basic/api-group/enable_api_group_versions.go b/test/e2e/basic/api-group/enable_api_group_versions.go index 489c7edd2f..5c043f2a12 100644 --- a/test/e2e/basic/api-group/enable_api_group_versions.go +++ b/test/e2e/basic/api-group/enable_api_group_versions.go @@ -238,7 +238,6 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, grou fmt.Println(errors.Wrapf(err, "failed to delete crd %q", crdName)) } }(fmt.Sprintf("rockband%ds.music.example.io.%d", i, i)) - } time.Sleep(6 * time.Minute) @@ -328,7 +327,6 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, grou ) return errors.New(msg) } - } } @@ -465,7 +463,6 @@ func resourceInfo(ctx context.Context, g, v, r string, index int) (map[string]ma // are found in the haystack argument values. func containsAll(haystack, needles map[string]string) bool { for nkey, nval := range needles { - hval, ok := haystack[nkey] if !ok { return false diff --git a/test/e2e/basic/storage-class-changing.go b/test/e2e/basic/storage-class-changing.go index 5b35c91d05..fae10c9f89 100644 --- a/test/e2e/basic/storage-class-changing.go +++ b/test/e2e/basic/storage-class-changing.go @@ -83,7 +83,6 @@ func (s *StorageClasssChanging) CreateResources() error { }) By(fmt.Sprintf("Create a deployment in namespace %s", s.VeleroCfg.VeleroNamespace), func() { - pvc, err := CreatePVC(s.Client, s.namespace, s.pvcName, s.srcStorageClass, nil) Expect(err).To(Succeed()) vols := CreateVolumes(pvc.Name, []string{s.volume}) diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index 9dc6f8e95a..216436696c 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -321,7 +321,6 @@ func BslDeletionTest(useVolumeSnapshots bool) { var snapshotCheckPoint SnapshotCheckPoint snapshotCheckPoint.NamespaceBackedUp = bslDeletionTestNs By(fmt.Sprintf("Snapshot should not be deleted in cloud object store after deleting bsl %s", backupLocation_1), func() { - snapshotCheckPoint, err = GetSnapshotCheckPoint(*veleroCfg.ClientToInstallVelero, veleroCfg, 1, bslDeletionTestNs, backupName_1, []string{podName_1}) Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint") Expect(SnapshotsShouldBeCreatedInCloud(veleroCfg.CloudProvider, diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 906846039f..225f8bf6a7 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -101,7 +101,6 @@ func init() { flag.StringVar(&VeleroCfg.EKSPolicyARN, "eks-policy-arn", "", "EKS plicy ARN for creating AWS IAM service account.") flag.StringVar(&VeleroCfg.DefaultCLSServiceAccountName, "default-cls-service-account-name", "", "default cluster service account name.") flag.StringVar(&VeleroCfg.StandbyCLSServiceAccountName, "standby-cls-service-account-name", "", "standby cluster service account name.") - } // Add label [SkipVanillaZfs]: diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index b85c7b347c..f84a3f7a54 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -108,7 +108,6 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) veleroCfg.ClusterToInstallVelero = veleroCfg.DefaultClusterName }) } - }) When("kibishii is the sample workload", func() { It("should be successfully backed up and restored to the default BackupStorageLocation", func() { diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index f6fd7993d0..a858743f95 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -55,7 +55,6 @@ func (p *PVBackupFiltering) Init() error { // annotation will be ignored, so it's only set for opt-out test if p.annotation == OPT_OUT_ANN { p.BackupArgs = append(p.BackupArgs, "--default-volumes-to-fs-backup") - } p.RestoreArgs = []string{ "create", "--namespace", p.VeleroCfg.VeleroNamespace, "restore", p.RestoreName, diff --git a/test/e2e/resource-filtering/exclude_namespaces.go b/test/e2e/resource-filtering/exclude_namespaces.go index 096ddcfb98..f4e22e852f 100644 --- a/test/e2e/resource-filtering/exclude_namespaces.go +++ b/test/e2e/resource-filtering/exclude_namespaces.go @@ -90,7 +90,6 @@ func (e *ExcludeNamespaces) Init() error { "create", "--namespace", e.VeleroCfg.VeleroNamespace, "restore", e.RestoreName, "--from-backup", e.BackupName, "--wait", } - } else { *e.NSIncluded = append(*e.NSIncluded, *e.nsExcluded...) e.BackupArgs = []string{ diff --git a/test/e2e/resource-filtering/include_namespaces.go b/test/e2e/resource-filtering/include_namespaces.go index e670d123c9..d39c2acb94 100644 --- a/test/e2e/resource-filtering/include_namespaces.go +++ b/test/e2e/resource-filtering/include_namespaces.go @@ -79,7 +79,6 @@ func (i *IncludeNamespaces) Init() error { "create", "--namespace", i.VeleroCfg.VeleroNamespace, "restore", i.RestoreName, "--from-backup", i.BackupName, "--wait", } - } else { i.BackupName = "backup-" + i.UUIDgen i.RestoreName = "restore-" + i.CaseBaseName diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 82265da9e4..0a724f225b 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -177,7 +177,6 @@ func (r *ResourcePoliciesCase) Verify() error { } } }) - } return nil } diff --git a/test/e2e/schedule/schedule.go b/test/e2e/schedule/schedule.go index db1bc1cad3..a12eaefe35 100644 --- a/test/e2e/schedule/schedule.go +++ b/test/e2e/schedule/schedule.go @@ -200,7 +200,6 @@ func (n *ScheduleBackup) Verify() error { fmt.Printf("Restored configmap is %v\n", configmap) Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q\n", ns)) } - }) return nil } diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index e6e9b16736..149ae76253 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -123,7 +123,6 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC veleroCfg.GCFrequency = "" By(fmt.Sprintf("Install the expected old version Velero (%s) for upgrade", veleroCLI2Version.VeleroVersion), func() { - //Set VeleroImage and RestoreHelperImage to blank //VeleroImage and RestoreHelperImage should be the default value in originalCli tmpCfgForOldVeleroInstall := veleroCfg @@ -230,7 +229,6 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC } By(fmt.Sprintf("Upgrade Velero by CLI %s", tmpCfg.VeleroCLI), func() { - tmpCfg.GCFrequency = "" tmpCfg.UseRestic = false tmpCfg.UseNodeAgent = !useVolumeSnapshots diff --git a/test/pkg/client/config.go b/test/pkg/client/config.go index 2302ca9c91..7546a0ac4c 100644 --- a/test/pkg/client/config.go +++ b/test/pkg/client/config.go @@ -130,7 +130,6 @@ func (c VeleroConfig) Colorized() bool { } return colorized - } func (c VeleroConfig) CACertFile() string { diff --git a/test/util/k8s/deployment.go b/test/util/k8s/deployment.go index 0fff022873..3158f8c023 100644 --- a/test/util/k8s/deployment.go +++ b/test/util/k8s/deployment.go @@ -114,7 +114,6 @@ func (d *DeploymentBuilder) WithVolume(volumes []*v1.Volume) *DeploymentBuilder MountPath: "/" + v.Name, }) d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, *v) - } // NOTE here just mount volumes to the first container diff --git a/test/util/k8s/persistentvolumes.go b/test/util/k8s/persistentvolumes.go index 441c1bd108..491f394257 100644 --- a/test/util/k8s/persistentvolumes.go +++ b/test/util/k8s/persistentvolumes.go @@ -28,7 +28,6 @@ import ( ) func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) { - p := &corev1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/test/util/k8s/pod.go b/test/util/k8s/pod.go index 5a76382c53..84e0a6b4eb 100644 --- a/test/util/k8s/pod.go +++ b/test/util/k8s/pod.go @@ -107,7 +107,6 @@ func GetPod(ctx context.Context, client TestClient, namespace string, pod string } func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podName string, ann map[string]string) (*corev1.Pod, error) { - newPod, err := GetPod(ctx, client, namespace, podName) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge pod %s in namespace %s", podName, namespace)) diff --git a/test/util/k8s/rbac.go b/test/util/k8s/rbac.go index ac7cfb56d4..c6fa2559c6 100644 --- a/test/util/k8s/rbac.go +++ b/test/util/k8s/rbac.go @@ -76,7 +76,6 @@ func GetClusterRoleBinding(ctx context.Context, client TestClient, rolebinding s } func CleanupClusterRole(ctx context.Context, client TestClient, CaseBaseName string) error { - clusterroles, err := client.ClientGo.RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { return errors.Wrap(err, "Could not retrieve clusterroles") @@ -95,7 +94,6 @@ func CleanupClusterRole(ctx context.Context, client TestClient, CaseBaseName str } func CleanupClusterRoleBinding(ctx context.Context, client TestClient, CaseBaseName string) error { - clusterrolebindings, err := client.ClientGo.RbacV1().ClusterRoleBindings().List(ctx, metav1.ListOptions{}) if err != nil { return errors.Wrap(err, "Could not retrieve clusterrolebindings") diff --git a/test/util/providers/aws_utils.go b/test/util/providers/aws_utils.go index 116c6b351c..d40a2bec20 100644 --- a/test/util/providers/aws_utils.go +++ b/test/util/providers/aws_utils.go @@ -128,7 +128,6 @@ func newS3Client(cfg aws.Config, url string, forcePathStyle bool) (*s3.Client, e // GetBucketRegion returns the AWS region that a bucket is in, or an error // if the region cannot be determined. func GetBucketRegion(bucket string) (string, error) { - cfg, err := config.LoadDefaultConfig(context.Background()) if err != nil { return "", errors.WithStack(err) diff --git a/test/util/providers/azure_utils.go b/test/util/providers/azure_utils.go index 06063dcfa4..4106f71a75 100644 --- a/test/util/providers/azure_utils.go +++ b/test/util/providers/azure_utils.go @@ -270,7 +270,6 @@ func (s AzureStorage) DeleteObjectsInBucket(cloudCredentialsFile, bslBucket, bsl marker = listBlob.NextMarker for _, blobInfo := range listBlob.Segment.BlobItems { - if strings.Contains(blobInfo.Name, bslPrefix+backupObject+"/") { deleteBlob(p, accountName, containerName, blobInfo.Name) if err != nil { @@ -284,7 +283,6 @@ func (s AzureStorage) DeleteObjectsInBucket(cloudCredentialsFile, bslBucket, bsl } func (s AzureStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupName string, snapshotCheck SnapshotCheckPoint) error { - ctx := context.Background() if err := loadCredentialsIntoEnv(cloudCredentialsFile); err != nil { diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index 81c734f2ba..2e4627a81e 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -173,7 +173,6 @@ func getPluginsByVersion(version, cloudProvider, objectStoreProvider, feature st plugins = append(plugins, pluginsForDatamover...) } } - } return plugins, nil } @@ -181,7 +180,6 @@ func getPluginsByVersion(version, cloudProvider, objectStoreProvider, feature st // getProviderVeleroInstallOptions returns Velero InstallOptions for the provider. func getProviderVeleroInstallOptions(veleroCfg *VeleroConfig, plugins []string) (*cliinstall.Options, error) { - if veleroCfg.CloudCredentialsFile == "" && veleroCfg.ServiceAccountNameToInstall == "" { return nil, errors.Errorf("No credentials were supplied to use for E2E tests") } @@ -1184,7 +1182,6 @@ func DeleteBslResource(ctx context.Context, veleroCLI string, bslName string) er } func SnapshotCRsCountShouldBe(ctx context.Context, namespace, backupName string, expectedCount int) error { - checkSnapshotCmd := exec.CommandContext(ctx, "kubectl", "get", "-n", namespace, "snapshots.backupdriver.cnsdp.vmware.com", "-o=jsonpath='{range .items[*]}{.metadata.labels.velero\\.io\\/backup-name}{\"\\n\"}{end}'") fmt.Printf("checkSnapshotCmd cmd =%v\n", checkSnapshotCmd) @@ -1603,7 +1600,6 @@ func InstallTestStorageClasses(path string) error { } func GetPvName(ctx context.Context, client TestClient, pvcName, namespace string) (string, error) { - pvcList, err := GetPvcByPVCName(context.Background(), namespace, pvcName) if err != nil { return "", err @@ -1622,7 +1618,6 @@ func GetPvName(ctx context.Context, client TestClient, pvcName, namespace string } return pvList[0], nil - } func DeletePVs(ctx context.Context, client TestClient, pvList []string) error { for _, pv := range pvList { @@ -1637,7 +1632,6 @@ func DeletePVs(ctx context.Context, client TestClient, pvList []string) error { } func CleanAllRetainedPV(ctx context.Context, client TestClient) { - pvNameList, err := GetAllPVNames(ctx, client) if err != nil { fmt.Println("fail to list PV") From 8d63c76c920e7b5fa747fa5ed8b08b58c75dc501 Mon Sep 17 00:00:00 2001 From: Ming Qiu Date: Tue, 30 Jan 2024 02:18:58 +0000 Subject: [PATCH 2/2] Add maintenance job Signed-off-by: Ming Qiu --- changelogs/unreleased/7451-qiuming-best | 1 + pkg/cmd/cli/install/install.go | 11 + pkg/cmd/cli/repomantenance/maintenance.go | 179 +++++ pkg/cmd/server/server.go | 26 +- pkg/cmd/velero/velero.go | 2 + .../backup_repository_controller.go | 28 +- .../backup_repository_controller_test.go | 2 +- pkg/datapath/file_system.go | 2 +- pkg/install/deployment.go | 28 + pkg/install/deployment_test.go | 16 + pkg/install/resources.go | 6 + pkg/repository/maintenance.go | 258 ++++++++ pkg/repository/maintenance_test.go | 408 ++++++++++++ pkg/repository/manager.go | 83 ++- pkg/repository/manager_test.go | 4 +- pkg/repository/provider/unified_repo.go | 4 + pkg/util/velero/velero.go | 80 +++ pkg/util/velero/velero_test.go | 614 ++++++++++++++++++ 18 files changed, 1722 insertions(+), 30 deletions(-) create mode 100644 changelogs/unreleased/7451-qiuming-best create mode 100644 pkg/cmd/cli/repomantenance/maintenance.go create mode 100644 pkg/repository/maintenance.go create mode 100644 pkg/repository/maintenance_test.go create mode 100644 pkg/util/velero/velero.go create mode 100644 pkg/util/velero/velero_test.go diff --git a/changelogs/unreleased/7451-qiuming-best b/changelogs/unreleased/7451-qiuming-best new file mode 100644 index 0000000000..c57823a1c6 --- /dev/null +++ b/changelogs/unreleased/7451-qiuming-best @@ -0,0 +1 @@ +Add repository maintenance job diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index 1c6772887c..48629252d9 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/pkg/errors" @@ -84,6 +85,7 @@ type Options struct { DefaultSnapshotMoveData bool DisableInformerCache bool ScheduleSkipImmediately bool + MaintenanceCfg repository.MaintenanceConfig } // BindFlags adds command line values to the options struct. @@ -128,6 +130,11 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.DefaultSnapshotMoveData, "default-snapshot-move-data", o.DefaultSnapshotMoveData, "Bool flag to configure Velero server to move data by default for all snapshots supporting data movement. Optional.") flags.BoolVar(&o.DisableInformerCache, "disable-informer-cache", o.DisableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable). Optional.") flags.BoolVar(&o.ScheduleSkipImmediately, "schedule-skip-immediately", o.ScheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).") + flags.IntVar(&o.MaintenanceCfg.KeepLatestMaitenanceJobs, "keep-latest-maintenance-jobs", o.MaintenanceCfg.KeepLatestMaitenanceJobs, "Number of latest maintenance jobs to keep each repository. Optional.") + flags.StringVar(&o.MaintenanceCfg.CPURequest, "maintenance-job-cpu-request", o.MaintenanceCfg.CPURequest, "CPU request for maintenance jobs. Default is no limit.") + flags.StringVar(&o.MaintenanceCfg.MemRequest, "maintenance-job-mem-request", o.MaintenanceCfg.MemRequest, "Memory request for maintenance jobs. Default is no limit.") + flags.StringVar(&o.MaintenanceCfg.CPULimit, "maintenance-job-cpu-limit", o.MaintenanceCfg.CPULimit, "CPU limit for maintenance jobs. Default is no limit.") + flags.StringVar(&o.MaintenanceCfg.MemLimit, "maintenance-job-mem-limit", o.MaintenanceCfg.MemLimit, "Memory limit for maintenance jobs. Default is no limit.") } // NewInstallOptions instantiates a new, default InstallOptions struct. @@ -157,6 +164,9 @@ func NewInstallOptions() *Options { DefaultSnapshotMoveData: false, DisableInformerCache: false, ScheduleSkipImmediately: false, + MaintenanceCfg: repository.MaintenanceConfig{ + KeepLatestMaitenanceJobs: repository.DefaultKeepLatestMaitenanceJobs, + }, } } @@ -224,6 +234,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) { DefaultSnapshotMoveData: o.DefaultSnapshotMoveData, DisableInformerCache: o.DisableInformerCache, ScheduleSkipImmediately: o.ScheduleSkipImmediately, + MaintenanceCfg: o.MaintenanceCfg, }, nil } diff --git a/pkg/cmd/cli/repomantenance/maintenance.go b/pkg/cmd/cli/repomantenance/maintenance.go new file mode 100644 index 0000000000..4e69c65f4f --- /dev/null +++ b/pkg/cmd/cli/repomantenance/maintenance.go @@ -0,0 +1,179 @@ +package repomantenance + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerocli "github.com/vmware-tanzu/velero/pkg/client" + "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/repository/provider" + "github.com/vmware-tanzu/velero/pkg/util/filesystem" + "github.com/vmware-tanzu/velero/pkg/util/logging" +) + +type Options struct { + RepoName string + BackupStorageLocation string + RepoType string + LogLevelFlag *logging.LevelFlag + FormatFlag *logging.FormatFlag +} + +func (o *Options) BindFlags(flags *pflag.FlagSet) { + flags.StringVar(&o.RepoName, "repo-name", "", "namespace of the pod/volume that the snapshot is for") + flags.StringVar(&o.BackupStorageLocation, "backup-storage-location", "", "backup's storage location name") + flags.StringVar(&o.RepoType, "repo-type", velerov1api.BackupRepositoryTypeKopia, "type of the repository where the snapshot is stored") + flags.Var(o.LogLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(o.LogLevelFlag.AllowedValues(), ", "))) + flags.Var(o.FormatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(o.FormatFlag.AllowedValues(), ", "))) +} + +func NewCommand(f velerocli.Factory) *cobra.Command { + o := &Options{ + LogLevelFlag: logging.LogLevelFlag(logrus.InfoLevel), + FormatFlag: logging.NewFormatFlag(), + } + cmd := &cobra.Command{ + Use: "repo-maintenance", + Hidden: true, + Short: "VELERO INTERNAL COMMAND ONLY - not intended to be run directly by users", + Run: func(c *cobra.Command, args []string) { + o.Run(f) + }, + } + + o.BindFlags(cmd.Flags()) + return cmd +} + +func (o *Options) Run(f velerocli.Factory) { + logger := logging.DefaultLogger(o.LogLevelFlag.Parse(), o.FormatFlag.Parse()) + logger.SetOutput(os.Stdout) + + pruneError := o.runRepoPrune(f, f.Namespace(), logger) + defer func() { + if pruneError != nil { + os.Exit(1) + } + }() + + if pruneError != nil { + logger.WithError(pruneError).Error("An error occurred when running repo prune") + terminationLogFile, err := os.Create("/dev/termination-log") + if err != nil { + logger.WithError(err).Error("Failed to create termination log file") + return + } + defer terminationLogFile.Close() + + if _, errWrite := terminationLogFile.WriteString(fmt.Sprintf("An error occurred: %v", err)); errWrite != nil { + logger.WithError(errWrite).Error("Failed to write error to termination log file") + } + } +} + +func (o *Options) initClient(f velerocli.Factory) (client.Client, error) { + scheme := runtime.NewScheme() + err := velerov1api.AddToScheme(scheme) + if err != nil { + return nil, errors.Wrap(err, "failed to add velero scheme") + } + + err = v1.AddToScheme(scheme) + if err != nil { + return nil, errors.Wrap(err, "failed to add api core scheme") + } + + config, err := f.ClientConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get client config") + } + + cli, err := client.New(config, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + return cli, nil +} + +func (o *Options) runRepoPrune(f velerocli.Factory, namespace string, logger logrus.FieldLogger) error { + cli, err := o.initClient(f) + if err != nil { + return err + } + + credentialFileStore, err := credentials.NewNamespacedFileStore( + cli, + namespace, + "/tmp/credentials", + filesystem.NewFileSystem(), + ) + if err != nil { + return errors.Wrap(err, "failed to create namespaced file store") + } + + credentialSecretStore, err := credentials.NewNamespacedSecretStore(cli, namespace) + if err != nil { + return errors.Wrap(err, "failed to create namespaced secret store") + } + + var repoProvider provider.Provider + if o.RepoType == velerov1api.BackupRepositoryTypeRestic { + repoProvider = provider.NewResticRepositoryProvider(credentialFileStore, filesystem.NewFileSystem(), logger) + } else { + repoProvider = provider.NewUnifiedRepoProvider( + credentials.CredentialGetter{ + FromFile: credentialFileStore, + FromSecret: credentialSecretStore, + }, o.RepoType, cli, logger) + } + + // backupRepository + repo, err := repository.GetBackupRepository(context.Background(), cli, namespace, + repository.BackupRepositoryKey{ + VolumeNamespace: o.RepoName, + BackupLocation: o.BackupStorageLocation, + RepositoryType: o.RepoType, + }, true) + + if err != nil { + return errors.Wrap(err, "failed to get backup repository") + } + + // bsl + bsl := &velerov1api.BackupStorageLocation{} + err = cli.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: repo.Spec.BackupStorageLocation}, bsl) + if err != nil { + return errors.Wrap(err, "failed to get backup storage location") + } + + para := provider.RepoParam{ + BackupRepo: repo, + BackupLocation: bsl, + } + + err = repoProvider.BoostRepoConnect(context.Background(), para) + if err != nil { + return errors.Wrap(err, "failed to boost repo connect") + } + + err = repoProvider.PruneRepo(context.Background(), para) + if err != nil { + return errors.Wrap(err, "failed to prune repo") + } + return nil +} diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 6ca3d59498..420faa5448 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -32,6 +32,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + appsv1 "k8s.io/api/apps/v1" + batchv1api "k8s.io/api/batch/v1" corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -136,6 +138,7 @@ type serverConfig struct { defaultSnapshotMoveData bool disableInformerCache bool scheduleSkipImmediately bool + maintenanceCfg repository.MaintenanceConfig } func NewCommand(f client.Factory) *cobra.Command { @@ -167,6 +170,9 @@ func NewCommand(f client.Factory) *cobra.Command { defaultSnapshotMoveData: false, disableInformerCache: defaultDisableInformerCache, scheduleSkipImmediately: false, + maintenanceCfg: repository.MaintenanceConfig{ + KeepLatestMaitenanceJobs: repository.DefaultKeepLatestMaitenanceJobs, + }, } ) @@ -240,7 +246,15 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().BoolVar(&config.defaultSnapshotMoveData, "default-snapshot-move-data", config.defaultSnapshotMoveData, "Move data by default for all snapshots supporting data movement.") command.Flags().BoolVar(&config.disableInformerCache, "disable-informer-cache", config.disableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).") command.Flags().BoolVar(&config.scheduleSkipImmediately, "schedule-skip-immediately", config.scheduleSkipImmediately, "Skip the first scheduled backup immediately after creating a schedule. Default is false (don't skip).") - + command.Flags().IntVar(&config.maintenanceCfg.KeepLatestMaitenanceJobs, "keep-latest-maintenance-jobs", config.maintenanceCfg.KeepLatestMaitenanceJobs, "Number of latest maintenance jobs to keep each repository. Optional.") + command.Flags().StringVar(&config.maintenanceCfg.CPURequest, "maintenance-job-cpu-request", config.maintenanceCfg.CPURequest, "CPU request for maintenance job. Default is no limit.") + command.Flags().StringVar(&config.maintenanceCfg.MemRequest, "maintenance-job-mem-request", config.maintenanceCfg.MemRequest, "Memory request for maintenance job. Default is no limit.") + command.Flags().StringVar(&config.maintenanceCfg.CPULimit, "maintenance-job-cpu-limit", config.maintenanceCfg.CPULimit, "CPU limit for maintenance job. Default is no limit.") + command.Flags().StringVar(&config.maintenanceCfg.MemLimit, "maintenance-job-mem-limit", config.maintenanceCfg.MemLimit, "Memory limit for maintenance job. Default is no limit.") + + // maintenance job log setting inherited from velero server + config.maintenanceCfg.FormatFlag = config.formatFlag + config.maintenanceCfg.LogLevelFlag = logLevelFlag return command } @@ -347,6 +361,14 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s cancelFunc() return nil, err } + if err := batchv1api.AddToScheme(scheme); err != nil { + cancelFunc() + return nil, err + } + if err := appsv1.AddToScheme(scheme); err != nil { + cancelFunc() + return nil, err + } ctrl.SetLogger(logrusr.New(logger)) @@ -647,7 +669,7 @@ func (s *server) initRepoManager() error { s.repoLocker = repository.NewRepoLocker() s.repoEnsurer = repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.resourceTimeout) - s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.credentialSecretStore, s.logger) + s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.credentialSecretStore, s.config.maintenanceCfg, s.logger) return nil } diff --git a/pkg/cmd/velero/velero.go b/pkg/cmd/velero/velero.go index b249416e17..972f5bb73c 100644 --- a/pkg/cmd/velero/velero.go +++ b/pkg/cmd/velero/velero.go @@ -26,6 +26,7 @@ import ( "k8s.io/klog/v2" "github.com/vmware-tanzu/velero/pkg/cmd/cli/debug" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/repomantenance" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd/cli/backup" @@ -122,6 +123,7 @@ operations can also be performed as 'velero backup get' and 'velero schedule cre backuplocation.NewCommand(f), snapshotlocation.NewCommand(f), debug.NewCommand(f), + repomantenance.NewCommand(f), ) // init and add the klog flags diff --git a/pkg/controller/backup_repository_controller.go b/pkg/controller/backup_repository_controller.go index 108c6c470c..e4d68d9998 100644 --- a/pkg/controller/backup_repository_controller.go +++ b/pkg/controller/backup_repository_controller.go @@ -189,10 +189,16 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) } switch backupRepo.Status.Phase { + case velerov1api.BackupRepositoryPhaseNotReady: + ready, err := r.checkNotReadyRepo(ctx, backupRepo, log) + if err != nil { + return ctrl.Result{}, err + } else if !ready { + return ctrl.Result{}, nil + } + fallthrough case velerov1api.BackupRepositoryPhaseReady: return ctrl.Result{}, r.runMaintenanceIfDue(ctx, backupRepo, log) - case velerov1api.BackupRepositoryPhaseNotReady: - return ctrl.Result{}, r.checkNotReadyRepo(ctx, backupRepo, log) } return ctrl.Result{}, nil @@ -277,8 +283,6 @@ func ensureRepo(repo *velerov1api.BackupRepository, repoManager repository.Manag } func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { - log.Debug("backupRepositoryController.runMaintenanceIfDue") - now := r.clock.Now() if !dueForMaintenance(req, now) { @@ -291,6 +295,7 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel // prune failures should be displayed in the `.status.message` field but // should not cause the repo to move to `NotReady`. log.Debug("Pruning repo") + if err := r.repositoryManager.PruneRepo(req); err != nil { log.WithError(err).Warn("error pruning repository") return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) { @@ -299,6 +304,7 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel } return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) { + rr.Status.Message = "" rr.Status.LastMaintenanceTime = &metav1.Time{Time: now} }) } @@ -307,28 +313,32 @@ func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool { return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now) } -func (r *BackupRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { +func (r *BackupRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) (bool, error) { log.Info("Checking backup repository for readiness") repoIdentifier, err := r.getIdentiferByBSL(ctx, req) if err != nil { - return r.patchBackupRepository(ctx, req, repoNotReady(err.Error())) + return false, r.patchBackupRepository(ctx, req, repoNotReady(err.Error())) } if repoIdentifier != req.Spec.ResticIdentifier { if err := r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Spec.ResticIdentifier = repoIdentifier }); err != nil { - return err + return false, err } } // we need to ensure it (first check, if check fails, attempt to init) // because we don't know if it's been successfully initialized yet. if err := ensureRepo(req, r.repositoryManager); err != nil { - return r.patchBackupRepository(ctx, req, repoNotReady(err.Error())) + return false, r.patchBackupRepository(ctx, req, repoNotReady(err.Error())) + } + err = r.patchBackupRepository(ctx, req, repoReady()) + if err != nil { + return false, err } - return r.patchBackupRepository(ctx, req, repoReady()) + return true, nil } func repoNotReady(msg string) func(*velerov1api.BackupRepository) { diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 22c64b45e8..a80a2da2b3 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -93,7 +93,7 @@ func TestCheckNotReadyRepo(t *testing.T) { err = reconciler.Client.Create(context.TODO(), locations) assert.NoError(t, err) - err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger) + _, err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger) assert.NoError(t, err) assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady) assert.Equal(t, "s3:test.amazonaws.com/bucket/restic/volume-ns-1", rr.Spec.ResticIdentifier) diff --git a/pkg/datapath/file_system.go b/pkg/datapath/file_system.go index 60a0c8347b..b7372d0a82 100644 --- a/pkg/datapath/file_system.go +++ b/pkg/datapath/file_system.go @@ -184,7 +184,7 @@ func (fs *fileSystemBR) Cancel() { func (fs *fileSystemBR) boostRepoConnect(ctx context.Context, repositoryType string, credentialGetter *credentials.CredentialGetter) error { if repositoryType == velerov1api.BackupRepositoryTypeKopia { - if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil { + if err := repoProvider.NewUnifiedRepoProvider(*credentialGetter, repositoryType, fs.client, fs.log).BoostRepoConnect(ctx, repoProvider.RepoParam{BackupLocation: fs.backupLocation, BackupRepo: fs.backupRepo}); err != nil { return err } } else { diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index 383b40856a..a8c2a131ab 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -27,6 +27,7 @@ import ( "github.com/vmware-tanzu/velero/internal/velero" "github.com/vmware-tanzu/velero/pkg/builder" + "github.com/vmware-tanzu/velero/pkg/repository" ) type podTemplateOption func(*podTemplateConfig) @@ -51,6 +52,7 @@ type podTemplateConfig struct { privilegedNodeAgent bool disableInformerCache bool scheduleSkipImmediately bool + maintenanceConfig repository.MaintenanceConfig } func WithImage(image string) podTemplateOption { @@ -177,6 +179,12 @@ func WithScheduleSkipImmediately(b bool) podTemplateOption { } } +func WithMaintenanceConfig(config repository.MaintenanceConfig) podTemplateOption { + return func(c *podTemplateConfig) { + c.maintenanceConfig = config + } +} + func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment { // TODO: Add support for server args c := &podTemplateConfig{ @@ -234,6 +242,26 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment args = append(args, fmt.Sprintf("--fs-backup-timeout=%v", c.podVolumeOperationTimeout)) } + if c.maintenanceConfig.KeepLatestMaitenanceJobs > 0 { + args = append(args, fmt.Sprintf("--keep-latest-maintenance-jobs=%d", c.maintenanceConfig.KeepLatestMaitenanceJobs)) + } + + if c.maintenanceConfig.CPULimit != "" { + args = append(args, fmt.Sprintf("--maintenance-job-cpu-limit=%s", c.maintenanceConfig.CPULimit)) + } + + if c.maintenanceConfig.CPURequest != "" { + args = append(args, fmt.Sprintf("--maintenance-job-cpu-request=%s", c.maintenanceConfig.CPURequest)) + } + + if c.maintenanceConfig.MemLimit != "" { + args = append(args, fmt.Sprintf("--maintenance-job-mem-limit=%s", c.maintenanceConfig.MemLimit)) + } + + if c.maintenanceConfig.MemRequest != "" { + args = append(args, fmt.Sprintf("--maintenance-job-mem-request=%s", c.maintenanceConfig.MemRequest)) + } + deployment := &appsv1.Deployment{ ObjectMeta: objectMeta(namespace, "velero"), TypeMeta: metav1.TypeMeta{ diff --git a/pkg/install/deployment_test.go b/pkg/install/deployment_test.go index 8e9649737d..fcbd4fc7fd 100644 --- a/pkg/install/deployment_test.go +++ b/pkg/install/deployment_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + + "github.com/vmware-tanzu/velero/pkg/repository" ) func TestDeployment(t *testing.T) { @@ -68,4 +70,18 @@ func TestDeployment(t *testing.T) { deploy = Deployment("velero", WithDisableInformerCache()) assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2) assert.Equal(t, "--disable-informer-cache=true", deploy.Spec.Template.Spec.Containers[0].Args[1]) + + deploy = Deployment("velero", WithMaintenanceConfig(repository.MaintenanceConfig{ + KeepLatestMaitenanceJobs: 3, + CPURequest: "100m", + MemRequest: "256Mi", + CPULimit: "200m", + MemLimit: "512Mi", + })) + assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 6) + assert.Equal(t, "--keep-latest-maintenance-jobs=3", deploy.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--maintenance-job-cpu-limit=200m", deploy.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--maintenance-job-cpu-request=100m", deploy.Spec.Template.Spec.Containers[0].Args[3]) + assert.Equal(t, "--maintenance-job-mem-limit=512Mi", deploy.Spec.Template.Spec.Containers[0].Args[4]) + assert.Equal(t, "--maintenance-job-mem-request=256Mi", deploy.Spec.Template.Spec.Containers[0].Args[5]) } diff --git a/pkg/install/resources.go b/pkg/install/resources.go index 4c721200ef..171fc2ece4 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -30,6 +30,8 @@ import ( v1crds "github.com/vmware-tanzu/velero/config/crd/v1/crds" v2alpha1crds "github.com/vmware-tanzu/velero/config/crd/v2alpha1/crds" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/util/logging" ) const ( @@ -261,6 +263,9 @@ type VeleroOptions struct { DefaultSnapshotMoveData bool DisableInformerCache bool ScheduleSkipImmediately bool + FormatFlag *logging.FormatFlag + LogLevelFlag *logging.LevelFlag + MaintenanceCfg repository.MaintenanceConfig } func AllCRDs() *unstructured.UnstructuredList { @@ -345,6 +350,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { WithPodVolumeOperationTimeout(o.PodVolumeOperationTimeout), WithUploaderType(o.UploaderType), WithScheduleSkipImmediately(o.ScheduleSkipImmediately), + WithMaintenanceConfig(o.MaintenanceCfg), } if len(o.Features) > 0 { diff --git a/pkg/repository/maintenance.go b/pkg/repository/maintenance.go new file mode 100644 index 0000000000..7a298f4690 --- /dev/null +++ b/pkg/repository/maintenance.go @@ -0,0 +1,258 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "context" + "fmt" + "sort" + "time" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/pkg/errors" + + "github.com/vmware-tanzu/velero/pkg/repository/provider" + "github.com/vmware-tanzu/velero/pkg/util/kube" + "github.com/vmware-tanzu/velero/pkg/util/logging" + veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero" +) + +const RepositoryNameLabel = "velero.io/repo-name" +const DefaultKeepLatestMaitenanceJobs = 3 +const DefaultMaintenanceJobCPURequest = "0" +const DefaultMaintenanceJobCPULimit = "0" +const DefaultMaintenanceJobMemRequest = "0" +const DefaultMaintenanceJobMemLimit = "0" + +// MaintenanceConfig is the configuration for the repo maintenance job +type MaintenanceConfig struct { + KeepLatestMaitenanceJobs int + CPURequest string + MemRequest string + CPULimit string + MemLimit string + LogLevelFlag *logging.LevelFlag + FormatFlag *logging.FormatFlag +} + +func generateJobName(repo string) string { + millisecond := time.Now().UTC().UnixMilli() // millisecond + + jobName := fmt.Sprintf("%s-maintain-job-%d", repo, millisecond) + if len(jobName) > 63 { // k8s job name length limit + jobName = fmt.Sprintf("repo-maintain-job-%d", millisecond) + } + + return jobName +} + +func buildMaintenanceJob(m MaintenanceConfig, param provider.RepoParam, cli client.Client, namespace string) (*batchv1.Job, error) { + // Get the Velero server deployment + deployment := &appsv1.Deployment{} + err := cli.Get(context.TODO(), types.NamespacedName{Name: "velero", Namespace: namespace}, deployment) + if err != nil { + return nil, err + } + + // Get the environment variables from the Velero server deployment + envVars := veleroutil.GetEnvVarsFromVeleroServer(deployment) + + // Get the volume mounts from the Velero server deployment + volumeMounts := veleroutil.GetVolumeMountsFromVeleroServer(deployment) + + // Get the volumes from the Velero server deployment + volumes := veleroutil.GetVolumesFromVeleroServer(deployment) + + // Get the service account from the Velero server deployment + serviceAccount := veleroutil.GetServiceAccountFromVeleroServer(deployment) + + // Get image + image := veleroutil.GetVeleroServerImage(deployment) + + // Set resource limits and requests + if m.CPURequest == "" { + m.CPURequest = DefaultMaintenanceJobCPURequest + } + if m.MemRequest == "" { + m.MemRequest = DefaultMaintenanceJobMemRequest + } + if m.CPULimit == "" { + m.CPULimit = DefaultMaintenanceJobCPULimit + } + if m.MemLimit == "" { + m.MemLimit = DefaultMaintenanceJobMemLimit + } + + resources, err := kube.ParseResourceRequirements(m.CPURequest, m.MemRequest, m.CPULimit, m.MemLimit) + if err != nil { + return nil, errors.Wrap(err, "failed to parse resource requirements for maintenance job") + } + + // Set arguments + args := []string{"repo-maintenance"} + args = append(args, fmt.Sprintf("--repo-name=%s", param.BackupRepo.Spec.VolumeNamespace)) + args = append(args, fmt.Sprintf("--repo-type=%s", param.BackupRepo.Spec.RepositoryType)) + args = append(args, fmt.Sprintf("--backup-storage-location=%s", param.BackupLocation.Name)) + args = append(args, fmt.Sprintf("--log-level=%s", m.LogLevelFlag.String())) + args = append(args, fmt.Sprintf("--log-format=%s", m.FormatFlag.String())) + + // build the maintenance job + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateJobName(param.BackupRepo.Name), + Namespace: param.BackupRepo.Namespace, + Labels: map[string]string{ + RepositoryNameLabel: param.BackupRepo.Name, + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: new(int32), // Never retry + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "velero-repo-maintenance-pod", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "velero-repo-maintenance-container", + Image: image, + Command: []string{ + "/velero", + }, + Args: args, + ImagePullPolicy: v1.PullIfNotPresent, + Env: envVars, + VolumeMounts: volumeMounts, + Resources: resources, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: volumes, + ServiceAccountName: serviceAccount, + }, + }, + }, + } + + if affinity := veleroutil.GetAffinityFromVeleroServer(deployment); affinity != nil { + job.Spec.Template.Spec.Affinity = affinity + } + + if tolerations := veleroutil.GetTolerationsFromVeleroServer(deployment); tolerations != nil { + job.Spec.Template.Spec.Tolerations = tolerations + } + + if nodeSelector := veleroutil.GetNodeSelectorFromVeleroServer(deployment); nodeSelector != nil { + job.Spec.Template.Spec.NodeSelector = nodeSelector + } + + if labels := veleroutil.GetVeleroServerLables(deployment); len(labels) > 0 { + job.Spec.Template.Labels = labels + } + + if annotations := veleroutil.GetVeleroServerAnnotations(deployment); len(annotations) > 0 { + job.Spec.Template.Annotations = annotations + } + + return job, nil +} + +// deleteOldMaintenanceJobs deletes old maintenance jobs and keeps the latest N jobs +func deleteOldMaintenanceJobs(cli client.Client, repo string, keep int) error { + // Get the maintenance job list by label + jobList := &batchv1.JobList{} + err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) + if err != nil { + return err + } + + // Delete old maintenance jobs + if len(jobList.Items) > keep { + sort.Slice(jobList.Items, func(i, j int) bool { + return jobList.Items[i].CreationTimestamp.Before(&jobList.Items[j].CreationTimestamp) + }) + for i := 0; i < len(jobList.Items)-keep; i++ { + err = cli.Delete(context.TODO(), &jobList.Items[i], client.PropagationPolicy(metav1.DeletePropagationBackground)) + if err != nil { + return err + } + } + } + + return nil +} + +func waitForJobComplete(ctx context.Context, client client.Client, job *batchv1.Job) error { + return wait.PollUntilContextCancel(ctx, 1, true, func(ctx context.Context) (bool, error) { + err := client.Get(ctx, types.NamespacedName{Namespace: job.Namespace, Name: job.Name}, job) + if err != nil && !apierrors.IsNotFound(err) { + return false, err + } + + if job.Status.Succeeded > 0 { + return true, nil + } + + if job.Status.Failed > 0 { + return true, fmt.Errorf("maintenance job %s/%s failed", job.Namespace, job.Name) + } + return false, nil + }) +} + +func getMaintenanceResultFromJob(cli client.Client, job *batchv1.Job) (string, error) { + // Get the maintenance job related pod by label selector + podList := &v1.PodList{} + err := cli.List(context.TODO(), podList, client.InNamespace(job.Namespace), client.MatchingLabels(map[string]string{"job-name": job.Name})) + if err != nil { + return "", err + } + + if len(podList.Items) == 0 { + return "", fmt.Errorf("no pod found for job %s", job.Name) + } + + // we only have one maintenance pod for the job + return podList.Items[0].Status.ContainerStatuses[0].State.Terminated.Message, nil +} + +func GetLatestMaintenanceJob(cli client.Client, repo string) (*batchv1.Job, error) { + // Get the maintenance job list by label + jobList := &batchv1.JobList{} + err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) + if err != nil { + return nil, err + } + + if len(jobList.Items) == 0 { + return nil, nil + } + + // Get the latest maintenance job + sort.Slice(jobList.Items, func(i, j int) bool { + return jobList.Items[i].CreationTimestamp.Time.After(jobList.Items[j].CreationTimestamp.Time) + }) + return &jobList.Items[0], nil +} diff --git a/pkg/repository/maintenance_test.go b/pkg/repository/maintenance_test.go new file mode 100644 index 0000000000..c27fb92875 --- /dev/null +++ b/pkg/repository/maintenance_test.go @@ -0,0 +1,408 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository/provider" + "github.com/vmware-tanzu/velero/pkg/util/logging" +) + +func TestGenerateJobName1(t *testing.T) { + testCases := []struct { + repo string + expectedStart string + }{ + { + repo: "example", + expectedStart: "example-maintain-job-", + }, + { + repo: strings.Repeat("a", 60), + expectedStart: "repo-maintain-job-", + }, + } + + for _, tc := range testCases { + t.Run(tc.repo, func(t *testing.T) { + // Call the function to test + jobName := generateJobName(tc.repo) + + // Check if the generated job name starts with the expected prefix + if !strings.HasPrefix(jobName, tc.expectedStart) { + t.Errorf("generated job name does not start with expected prefix") + } + + // Check if the length of the generated job name exceeds the Kubernetes limit + if len(jobName) > 63 { + t.Errorf("generated job name exceeds Kubernetes limit") + } + }) + } +} +func TestDeleteOldMaintenanceJobs(t *testing.T) { + // Set up test repo and keep value + repo := "test-repo" + keep := 2 + + // Create some maintenance jobs for testing + var objs []client.Object + // Create a newer job + newerJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job1", + Namespace: "default", + Labels: map[string]string{RepositoryNameLabel: repo}, + }, + Spec: batchv1.JobSpec{}, + } + objs = append(objs, newerJob) + // Create older jobs + for i := 2; i <= 3; i++ { + olderJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("job%d", i), + Namespace: "default", + Labels: map[string]string{RepositoryNameLabel: repo}, + CreationTimestamp: metav1.Time{ + Time: metav1.Now().Add(time.Duration(-24*i) * time.Hour), + }, + }, + Spec: batchv1.JobSpec{}, + } + objs = append(objs, olderJob) + } + // Create a fake Kubernetes client + scheme := runtime.NewScheme() + _ = batchv1.AddToScheme(scheme) + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() + + // Call the function + err := deleteOldMaintenanceJobs(cli, repo, keep) + assert.NoError(t, err) + + // Get the remaining jobs + jobList := &batchv1.JobList{} + err = cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) + assert.NoError(t, err) + + // We expect the number of jobs to be equal to 'keep' + assert.Equal(t, keep, len(jobList.Items)) + + // We expect that the oldest jobs were deleted + // Job3 should not be present in the remaining list + assert.NotContains(t, jobList.Items, objs[2]) + + // Job2 should also not be present in the remaining list + assert.NotContains(t, jobList.Items, objs[1]) +} + +func TestWaitForJobComplete(t *testing.T) { + // Set up test job + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: "default", + }, + Status: batchv1.JobStatus{}, + } + + // Define test cases + tests := []struct { + description string // Test case description + jobStatus batchv1.JobStatus // Job status to set for the test + expectError bool // Whether an error is expected + }{ + { + description: "Job Succeeded", + jobStatus: batchv1.JobStatus{ + Succeeded: 1, + }, + expectError: false, + }, + { + description: "Job Failed", + jobStatus: batchv1.JobStatus{ + Failed: 1, + }, + expectError: true, + }, + } + + // Run tests + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + // Set the job status + job.Status = tc.jobStatus + // Create a fake Kubernetes client + cli := fake.NewClientBuilder().WithObjects(job).Build() + // Call the function + err := waitForJobComplete(context.Background(), cli, job) + + // Check if the error matches the expectation + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestGetMaintenanceResultFromJob(t *testing.T) { + // Set up test job + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: "default", + }, + } + + // Set up test pod + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + Labels: map[string]string{"job-name": job.Name}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Message: "test message", + }, + }, + }, + }, + }, + } + + // Create a fake Kubernetes client + cli := fake.NewClientBuilder().WithObjects(job, pod).Build() + + // Call the function + result, err := getMaintenanceResultFromJob(cli, job) + + // Check if the result and error match the expectation + assert.NoError(t, err) + assert.Equal(t, "test message", result) +} + +func TestGetLatestMaintenanceJob(t *testing.T) { + // Set up test repo + repo := "test-repo" + + // Create some maintenance jobs for testing + var objs []client.Object + // Create a newer job + newerJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job1", + Namespace: "default", + Labels: map[string]string{RepositoryNameLabel: repo}, + CreationTimestamp: metav1.Time{ + Time: metav1.Now().Add(time.Duration(-24) * time.Hour), + }, + }, + Spec: batchv1.JobSpec{}, + } + objs = append(objs, newerJob) + + // Create an older job + olderJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job2", + Namespace: "default", + Labels: map[string]string{RepositoryNameLabel: repo}, + }, + Spec: batchv1.JobSpec{}, + } + objs = append(objs, olderJob) + + // Create a fake Kubernetes client + scheme := runtime.NewScheme() + _ = batchv1.AddToScheme(scheme) + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() + + // Call the function + job, err := GetLatestMaintenanceJob(cli, repo) + assert.NoError(t, err) + + // We expect the returned job to be the newer job + assert.Equal(t, newerJob.Name, job.Name) +} +func TestBuildMaintenanceJob(t *testing.T) { + testCases := []struct { + name string + m MaintenanceConfig + deploy *appsv1.Deployment + expectedJobName string + expectedError bool + }{ + { + name: "Valid maintenance job", + m: MaintenanceConfig{ + CPURequest: "100m", + MemRequest: "128Mi", + CPULimit: "200m", + MemLimit: "256Mi", + LogLevelFlag: logging.LogLevelFlag(logrus.InfoLevel), + FormatFlag: logging.NewFormatFlag(), + }, + deploy: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "velero", + Namespace: "velero", + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "velero-repo-maintenance-container", + Image: "velero-image", + }, + }, + }, + }, + }, + }, + expectedJobName: "test-123-maintain-job", + expectedError: false, + }, + { + name: "Error getting Velero server deployment", + m: MaintenanceConfig{ + CPURequest: "100m", + MemRequest: "128Mi", + CPULimit: "200m", + MemLimit: "256Mi", + LogLevelFlag: logging.LogLevelFlag(logrus.InfoLevel), + FormatFlag: logging.NewFormatFlag(), + }, + expectedJobName: "", + expectedError: true, + }, + } + + param := provider.RepoParam{ + BackupRepo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "velero", + Name: "test-123", + }, + Spec: velerov1api.BackupRepositorySpec{ + VolumeNamespace: "test-123", + RepositoryType: "kopia", + }, + }, + BackupLocation: &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "velero", + Name: "test-location", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a fake clientset with resources + objs := []runtime.Object{param.BackupLocation, param.BackupRepo} + + if tc.deploy != nil { + objs = append(objs, tc.deploy) + } + scheme := runtime.NewScheme() + _ = appsv1.AddToScheme(scheme) + _ = velerov1api.AddToScheme(scheme) + cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() + + // Call the function to test + job, err := buildMaintenanceJob(tc.m, param, cli, "velero") + + // Check the error + if tc.expectedError { + assert.Error(t, err) + assert.Nil(t, job) + } else { + assert.NoError(t, err) + assert.NotNil(t, job) + assert.Contains(t, job.Name, tc.expectedJobName) + assert.Equal(t, param.BackupRepo.Namespace, job.Namespace) + assert.Equal(t, param.BackupRepo.Name, job.Labels[RepositoryNameLabel]) + + // Check container + assert.Len(t, job.Spec.Template.Spec.Containers, 1) + container := job.Spec.Template.Spec.Containers[0] + assert.Equal(t, "velero-repo-maintenance-container", container.Name) + assert.Equal(t, "velero-image", container.Image) + assert.Equal(t, v1.PullIfNotPresent, container.ImagePullPolicy) + + // Check resources + expectedResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(tc.m.CPURequest), + v1.ResourceMemory: resource.MustParse(tc.m.MemRequest), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(tc.m.CPULimit), + v1.ResourceMemory: resource.MustParse(tc.m.MemLimit), + }, + } + assert.Equal(t, expectedResources, container.Resources) + + // Check args + expectedArgs := []string{ + "repo-maintenance", + fmt.Sprintf("--repo-name=%s", param.BackupRepo.Spec.VolumeNamespace), + fmt.Sprintf("--repo-type=%s", param.BackupRepo.Spec.RepositoryType), + fmt.Sprintf("--backup-storage-location=%s", param.BackupLocation.Name), + fmt.Sprintf("--log-level=%s", tc.m.LogLevelFlag.String()), + fmt.Sprintf("--log-format=%s", tc.m.FormatFlag.String()), + } + assert.Equal(t, expectedArgs, container.Args) + + // Check affinity + assert.Nil(t, job.Spec.Template.Spec.Affinity) + + // Check tolerations + assert.Nil(t, job.Spec.Template.Spec.Tolerations) + + // Check node selector + assert.Nil(t, job.Spec.Template.Spec.NodeSelector) + } + }) + } +} diff --git a/pkg/repository/manager.go b/pkg/repository/manager.go index 3e412a73a4..8ed5afc4a3 100644 --- a/pkg/repository/manager.go +++ b/pkg/repository/manager.go @@ -77,13 +77,14 @@ type Manager interface { } type manager struct { - namespace string - providers map[string]provider.Provider - client client.Client - repoLocker *RepoLocker - repoEnsurer *Ensurer - fileSystem filesystem.Interface - log logrus.FieldLogger + namespace string + providers map[string]provider.Provider + client client.Client + repoLocker *RepoLocker + repoEnsurer *Ensurer + fileSystem filesystem.Interface + maintenanceCfg MaintenanceConfig + log logrus.FieldLogger } // NewManager create a new repository manager. @@ -94,23 +95,25 @@ func NewManager( repoEnsurer *Ensurer, credentialFileStore credentials.FileStore, credentialSecretStore credentials.SecretStore, + maintenanceCfg MaintenanceConfig, log logrus.FieldLogger, ) Manager { mgr := &manager{ - namespace: namespace, - client: client, - providers: map[string]provider.Provider{}, - repoLocker: repoLocker, - repoEnsurer: repoEnsurer, - fileSystem: filesystem.NewFileSystem(), - log: log, + namespace: namespace, + client: client, + providers: map[string]provider.Provider{}, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + fileSystem: filesystem.NewFileSystem(), + maintenanceCfg: maintenanceCfg, + log: log, } mgr.providers[velerov1api.BackupRepositoryTypeRestic] = provider.NewResticRepositoryProvider(credentialFileStore, mgr.fileSystem, mgr.log) mgr.providers[velerov1api.BackupRepositoryTypeKopia] = provider.NewUnifiedRepoProvider(credentials.CredentialGetter{ FromFile: credentialFileStore, FromSecret: credentialSecretStore, - }, velerov1api.BackupRepositoryTypeKopia, mgr.log) + }, velerov1api.BackupRepositoryTypeKopia, client, mgr.log) return mgr } @@ -177,7 +180,55 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error { return errors.WithStack(err) } - return prd.PruneRepo(context.Background(), param) + log := m.log.WithFields(logrus.Fields{ + "BSL name": param.BackupLocation.Name, + "repo type": param.BackupRepo.Spec.RepositoryType, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, + }) + + log.Info("Start to maintence repo") + + maintenanceJob, err := buildMaintenanceJob(m.maintenanceCfg, param, m.client, m.namespace) + if err != nil { + return errors.Wrap(err, "error to build maintenance job") + } + + log = log.WithField("job", fmt.Sprintf("%s/%s", maintenanceJob.Namespace, maintenanceJob.Name)) + + if err := m.client.Create(context.TODO(), maintenanceJob); err != nil { + return errors.Wrap(err, "error to create maintenance job") + } + log.Debug("Creating maintenance job") + + defer func() { + if err := deleteOldMaintenanceJobs(m.client, param.BackupRepo.Name, + m.maintenanceCfg.KeepLatestMaitenanceJobs); err != nil { + log.WithError(err).Error("Failed to delete maintenance job") + } + }() + + var jobErr error + if err := waitForJobComplete(context.TODO(), m.client, maintenanceJob); err != nil { + log.WithError(err).Error("Error to wait for maintenance job complete") + jobErr = err // we won't return here for job may failed by maintenance failure, we want return the actual error + } + + result, err := getMaintenanceResultFromJob(m.client, maintenanceJob) + if err != nil { + return errors.Wrap(err, "error to get maintenance job result") + } + + if result != "" { + return errors.New(fmt.Sprintf("Maintenance job %s failed: %s", maintenanceJob.Name, result)) + } + + if jobErr != nil { + return errors.Wrap(jobErr, "error to wait for maintenance job complete") + } + + log.Info("Maintenance repo complete") + return nil } func (m *manager) UnlockRepo(repo *velerov1api.BackupRepository) error { diff --git a/pkg/repository/manager_test.go b/pkg/repository/manager_test.go index 4d84919d2a..679cc8066f 100644 --- a/pkg/repository/manager_test.go +++ b/pkg/repository/manager_test.go @@ -21,12 +21,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) func TestGetRepositoryProvider(t *testing.T) { - mgr := NewManager("", nil, nil, nil, nil, nil, nil).(*manager) + var fakeClient kbclient.Client + mgr := NewManager("", fakeClient, nil, nil, nil, nil, MaintenanceConfig{}, nil).(*manager) repo := &velerov1.BackupRepository{} // empty repository type diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 11c0845497..aaaf6f034a 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -29,6 +29,7 @@ import ( "github.com/kopia/kopia/repo" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -43,6 +44,7 @@ type unifiedRepoProvider struct { workPath string repoService udmrepo.BackupRepoService repoBackend string + cli client.Client log logrus.FieldLogger } @@ -73,11 +75,13 @@ const ( func NewUnifiedRepoProvider( credentialGetter credentials.CredentialGetter, repoBackend string, + cli client.Client, log logrus.FieldLogger, ) Provider { repo := unifiedRepoProvider{ credentialGetter: credentialGetter, repoBackend: repoBackend, + cli: cli, log: log, } diff --git a/pkg/util/velero/velero.go b/pkg/util/velero/velero.go new file mode 100644 index 0000000000..bc52253365 --- /dev/null +++ b/pkg/util/velero/velero.go @@ -0,0 +1,80 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package velero + +import ( + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" +) + +// GetNodeSelectorFromVeleroServer get the node selector from the Velero server deployment +func GetNodeSelectorFromVeleroServer(deployment *appsv1.Deployment) map[string]string { + return deployment.Spec.Template.Spec.NodeSelector +} + +// GetTolerationsFromVeleroServer get the tolerations from the Velero server deployment +func GetTolerationsFromVeleroServer(deployment *appsv1.Deployment) []v1.Toleration { + return deployment.Spec.Template.Spec.Tolerations +} + +// GetAffinityFromVeleroServer get the affinity from the Velero server deployment +func GetAffinityFromVeleroServer(deployment *appsv1.Deployment) *v1.Affinity { + return deployment.Spec.Template.Spec.Affinity +} + +// GetEnvVarsFromVeleroServer get the environment variables from the Velero server deployment +func GetEnvVarsFromVeleroServer(deployment *appsv1.Deployment) []v1.EnvVar { + for _, container := range deployment.Spec.Template.Spec.Containers { + // We only have one container in the Velero server deployment + return container.Env + } + return nil +} + +// GetVolumeMountsFromVeleroServer get the volume mounts from the Velero server deployment +func GetVolumeMountsFromVeleroServer(deployment *appsv1.Deployment) []v1.VolumeMount { + for _, container := range deployment.Spec.Template.Spec.Containers { + // We only have one container in the Velero server deployment + return container.VolumeMounts + } + return nil +} + +// GetVolumesFromVeleroServer get the volumes from the Velero server deployment +func GetVolumesFromVeleroServer(deployment *appsv1.Deployment) []v1.Volume { + return deployment.Spec.Template.Spec.Volumes +} + +// GetServiceAccountFromVeleroServer get the service account from the Velero server deployment +func GetServiceAccountFromVeleroServer(deployment *appsv1.Deployment) string { + return deployment.Spec.Template.Spec.ServiceAccountName +} + +// getVeleroServerImage get the image of the Velero server deployment +func GetVeleroServerImage(deployment *appsv1.Deployment) string { + return deployment.Spec.Template.Spec.Containers[0].Image +} + +// GetVeleroServerLables get the labels of the Velero server deployment +func GetVeleroServerLables(deployment *appsv1.Deployment) map[string]string { + return deployment.Spec.Template.Labels +} + +// GetVeleroServerAnnotations get the annotations of the Velero server deployment +func GetVeleroServerAnnotations(deployment *appsv1.Deployment) map[string]string { + return deployment.Spec.Template.Annotations +} diff --git a/pkg/util/velero/velero_test.go b/pkg/util/velero/velero_test.go new file mode 100644 index 0000000000..b03a393f56 --- /dev/null +++ b/pkg/util/velero/velero_test.go @@ -0,0 +1,614 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package velero + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetNodeSelectorFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want map[string]string + }{ + { + name: "no node selector", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + NodeSelector: map[string]string{}, + }, + }, + }, + }, + want: map[string]string{}, + }, + { + name: "node selector", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + NodeSelector: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + want: map[string]string{ + "foo": "bar", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetNodeSelectorFromVeleroServer(test.deploy) + if len(got) != len(test.want) { + t.Errorf("expected node selector to have %d elements, got %d", len(test.want), len(got)) + } + for k, v := range test.want { + if got[k] != v { + t.Errorf("expected node selector to have key %s with value %s, got %s", k, v, got[k]) + } + } + }) + } +} + +func TestGetTolerationsFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want []v1.Toleration + }{ + { + name: "no tolerations", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{}, + }, + }, + }, + }, + want: []v1.Toleration{}, + }, + { + name: "tolerations", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: "Exists", + }, + }, + }, + }, + }, + }, + want: []v1.Toleration{ + { + Key: "foo", + Operator: "Exists", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetTolerationsFromVeleroServer(test.deploy) + if len(got) != len(test.want) { + t.Errorf("expected tolerations to have %d elements, got %d", len(test.want), len(got)) + } + for i, want := range test.want { + if got[i] != want { + t.Errorf("expected toleration at index %d to be %v, got %v", i, want, got[i]) + } + } + }) + } +} + +func TestGetAffinityFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want *v1.Affinity + }{ + { + name: "no affinity", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Affinity: nil, + }, + }, + }, + }, + want: nil, + }, + { + name: "affinity", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: "In", + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: "In", + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetAffinityFromVeleroServer(test.deploy) + + if got == nil { + if test.want != nil { + t.Errorf("expected affinity to be %v, got nil", test.want) + } + } else { + if test.want == nil { + t.Errorf("expected affinity to be nil, got %v", got) + } else { + if got.NodeAffinity == nil { + if test.want.NodeAffinity != nil { + t.Errorf("expected node affinity to be %v, got nil", test.want.NodeAffinity) + } + } else { + if test.want.NodeAffinity == nil { + t.Errorf("expected node affinity to be nil, got %v", got.NodeAffinity) + } else { + if got.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + if test.want.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + t.Errorf("expected required during scheduling ignored during execution to be %v, got nil", test.want.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) + } + } else { + if test.want.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + t.Errorf("expected required during scheduling ignored during execution to be nil, got %v", got.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) + } else { + if !reflect.DeepEqual(got.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, test.want.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) { + t.Errorf("expected required during scheduling ignored during execution to be %v, got %v", test.want.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, got.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) + } + } + } + } + } + } + } + }) + } +} + +func TestGetEnvVarsFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want []v1.EnvVar + }{ + { + name: "no env vars", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Env: []v1.EnvVar{}, + }, + }, + }, + }, + }, + }, + want: []v1.EnvVar{}, + }, + { + name: "env vars", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Env: []v1.EnvVar{ + { + Name: "foo", + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + }, + want: []v1.EnvVar{ + { + Name: "foo", + Value: "bar", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetEnvVarsFromVeleroServer(test.deploy) + if len(got) != len(test.want) { + t.Errorf("expected env vars to have %d elements, got %d", len(test.want), len(got)) + } + for i, want := range test.want { + if got[i] != want { + t.Errorf("expected env var at index %d to be %v, got %v", i, want, got[i]) + } + } + }) + } +} + +func TestGetVolumeMountsFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want []v1.VolumeMount + }{ + { + name: "no volume mounts", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{}, + }, + }, + }, + }, + }, + }, + want: []v1.VolumeMount{}, + }, + { + name: "volume mounts", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{ + { + Name: "foo", + MountPath: "/bar", + }, + }, + }, + }, + }, + }, + }, + }, + want: []v1.VolumeMount{ + { + Name: "foo", + MountPath: "/bar", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetVolumeMountsFromVeleroServer(test.deploy) + if len(got) != len(test.want) { + t.Errorf("expected volume mounts to have %d elements, got %d", len(test.want), len(got)) + } + for i, want := range test.want { + if got[i] != want { + t.Errorf("expected volume mount at index %d to be %v, got %v", i, want, got[i]) + } + } + }) + } +} + +func TestGetVolumesFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want []v1.Volume + }{ + { + name: "no volumes", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{}, + }, + }, + }, + }, + want: []v1.Volume{}, + }, + { + name: "volumes", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "foo", + }, + }, + }, + }, + }, + }, + want: []v1.Volume{ + { + Name: "foo", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetVolumesFromVeleroServer(test.deploy) + if len(got) != len(test.want) { + t.Errorf("expected volumes to have %d elements, got %d", len(test.want), len(got)) + } + for i, want := range test.want { + if got[i] != want { + t.Errorf("expected volume at index %d to be %v, got %v", i, want, got[i]) + } + } + }) + } +} + +func TestGetServiceAccountFromVeleroServer(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want string + }{ + { + name: "no service account", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + ServiceAccountName: "", + }, + }, + }, + }, + want: "", + }, + { + name: "service account", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + ServiceAccountName: "foo", + }, + }, + }, + }, + want: "foo", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetServiceAccountFromVeleroServer(test.deploy) + if got != test.want { + t.Errorf("expected service account to be %s, got %s", test.want, got) + } + }) + } +} + +func TestGetVeleroServerImage(t *testing.T) { + tests := []struct { + name string + deploy *appsv1.Deployment + want string + }{ + { + name: "velero server image", + deploy: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Image: "velero/velero:latest", + }, + }, + }, + }, + }, + }, + want: "velero/velero:latest", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetVeleroServerImage(test.deploy) + if got != test.want { + t.Errorf("expected velero server image to be %s, got %s", test.want, got) + } + }) + } +} + +func TestGetVeleroServerLables(t *testing.T) { + tests := []struct { + name string + deployment *appsv1.Deployment + expected map[string]string + }{ + { + name: "Empty Labels", + deployment: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + }, + expected: map[string]string{}, + }, + { + name: "Non-empty Labels", + deployment: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "velero", + "component": "server", + }, + }, + }, + }, + }, + expected: map[string]string{ + "app": "velero", + "component": "server", + }, + }, + } + + // Run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetVeleroServerLables(tt.deployment) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetVeleroServerAnnotations(t *testing.T) { + tests := []struct { + name string + deployment *appsv1.Deployment + expected map[string]string + }{ + { + name: "Empty Labels", + deployment: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + }, + expected: map[string]string{}, + }, + { + name: "Non-empty Labels", + deployment: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "app": "velero", + "component": "server", + }, + }, + }, + }, + }, + expected: map[string]string{ + "app": "velero", + "component": "server", + }, + }, + } + + // Run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetVeleroServerAnnotations(tt.deployment) + assert.Equal(t, tt.expected, result) + }) + } +}