Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use newGR instead of groupResource after apiversion convert #8199

Merged
merged 2 commits into from
Sep 12, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions pkg/restore/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
"os"
"os/signal"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
Expand Down Expand Up @@ -1479,9 +1480,12 @@
}
newGR.Resource = gvr.Resource
}
if !reflect.DeepEqual(newGR, groupResource) {
ctx.log.Infof("Resource to be restored changed from %v to %v", groupResource, newGR)

Check warning on line 1484 in pkg/restore/restore.go

View check run for this annotation

Codecov / codecov/patch

pkg/restore/restore.go#L1484

Added line #L1484 was not covered by tests
}
resourceClient, err := ctx.getResourceClient(newGR, obj, obj.GetNamespace())
if err != nil {
warnings.Add(namespace, fmt.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
warnings.Add(namespace, fmt.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &newGR, err))

Check warning on line 1488 in pkg/restore/restore.go

View check run for this annotation

Codecov / codecov/patch

pkg/restore/restore.go#L1488

Added line #L1488 was not covered by tests
return warnings, errs, itemExists
}

Expand All @@ -1496,7 +1500,7 @@
// new namespace
if !ctx.disableInformerCache {
ctx.log.Debugf("Checking for existence %s: %v", obj.GroupVersionKind().Kind, name)
fromCluster, err = ctx.getResource(groupResource, obj, namespace, name)
fromCluster, err = ctx.getResource(newGR, obj, namespace, name)
}
if err != nil || fromCluster == nil {
// couldn't find the resource, attempt to create
Expand All @@ -1519,7 +1523,7 @@
// and if err then itemExists remains false as we were not able to confirm the existence of the object via Get call or creation call.
// We return the get error as a warning to notify the user that the object could exist in cluster and we were not able to confirm it.
if !ctx.disableInformerCache {
fromCluster, err = ctx.getResource(groupResource, obj, namespace, name)
fromCluster, err = ctx.getResource(newGR, obj, namespace, name)

Check warning on line 1526 in pkg/restore/restore.go

View check run for this annotation

Codecov / codecov/patch

pkg/restore/restore.go#L1526

Added line #L1526 was not covered by tests
} else {
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
}
Expand Down Expand Up @@ -1550,7 +1554,7 @@
fromClusterWithLabels := fromCluster.DeepCopy() // saving the in-cluster object so that we can create label patch if overall patch fails

if !equality.Semantic.DeepEqual(fromCluster, obj) {
switch groupResource {
switch newGR {
case kuberesource.ServiceAccounts:
desired, err := mergeServiceAccounts(fromCluster, obj)
if err != nil {
Expand Down Expand Up @@ -1644,14 +1648,15 @@
return warnings, errs, itemExists
}

// detemind whether to restore status according to original GR
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
if shouldRestoreStatus && statusFieldErr != nil {
err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr)
ctx.log.Errorf(err.Error())
errs.Add(namespace, err)
return warnings, errs, itemExists
}
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", groupResource, statusFieldExists, shouldRestoreStatus)
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", newGR, statusFieldExists, shouldRestoreStatus)
// if it should restore status, run a UpdateStatus
if statusFieldExists && shouldRestoreStatus {
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
Expand Down Expand Up @@ -1690,7 +1695,7 @@
}
}

if groupResource == kuberesource.Pods {
if newGR == kuberesource.Pods {
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
errs.Add(namespace, err)
Expand All @@ -1707,7 +1712,7 @@

// Asynchronously executes restore exec hooks if any
// Velero will wait for all the asynchronous hook operations to finish in finalizing phase, using hook tracker to track the execution progress.
if groupResource == kuberesource.Pods {
if newGR == kuberesource.Pods {
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil {
ctx.log.Errorf("error converting pod %s: %v", kube.NamespaceAndName(obj), err)
Expand All @@ -1727,7 +1732,7 @@

// Wait for a CRD to be available for instantiating resources
// before continuing.
if groupResource == kuberesource.CustomResourceDefinitions {
if newGR == kuberesource.CustomResourceDefinitions {
available, err := ctx.crdAvailable(name, resourceClient)
if err != nil {
errs.Add(namespace, errors.Wrapf(err, "error verifying the CRD %s is ready to use", name))
Expand Down
Loading