Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Kv lsp enabled flag #4794

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 80 additions & 0 deletions go-controller/pkg/kubevirt/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package kubevirt
import (
"fmt"
"net"
"sort"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -349,3 +350,82 @@ func ZoneContainsPodSubnetOrUntracked(watchFactory *factory.WatchFactory, lsMana
func IsPodOwnedByVirtualMachine(pod *corev1.Pod) bool {
return ExtractVMNameFromPod(pod) != nil
}

func isTargetPodReady(targetPod *corev1.Pod) bool {
if targetPod == nil {
return false
}

// This annotation only appears on live migration scenarios, and it signals
// that target VM pod is ready to receive traffic, so we can route
// traffic to it.
targetReadyTimestamp := targetPod.Annotations[kubevirtv1.MigrationTargetReadyTimestamp]

// VM is ready to receive traffic
return targetReadyTimestamp != ""
}

func filterNotComplete(vmPods []*corev1.Pod) []*corev1.Pod {
var notCompletePods []*corev1.Pod
for _, vmPod := range vmPods {
if !util.PodCompleted(vmPod) {
notCompletePods = append(notCompletePods, vmPod)
}
}

return notCompletePods
}

type LiveMigrationState string

const (
LiveMigrationInProgress LiveMigrationState = "InProgress"
LiveMigrationTargetDomainReady LiveMigrationState = "TargetDomainReady"
LiveMigrationCompleted LiveMigrationState = "Completed"
)

type LiveMigrationStatus struct {
SourcePod, TargetPod *corev1.Pod
State LiveMigrationState
}

func DiscoverLiveMigrationStatus(client *factory.WatchFactory, pod *corev1.Pod) (*LiveMigrationStatus, error) {
vmKey := ExtractVMNameFromPod(pod)
if vmKey == nil {
return nil, nil
}

vmPods, err := client.GetPodsBySelector(pod.Namespace, metav1.LabelSelector{MatchLabels: map[string]string{kubevirtv1.VirtualMachineNameLabel: vmKey.Name}})
if err != nil {
return nil, err
}

//TODO: cover failed migration
vmPods = filterNotComplete(vmPods)

// no migration
if len(vmPods) < 2 {
return nil, nil
}

if len(vmPods) > 2 {
return nil, fmt.Errorf("unexpected live migration state at pods: %+v", vmPods)
Copy link
Contributor

@maiqueb maiqueb Oct 24, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

too much info IMHO - while I understand having all potential relevant information at hand is helpful, we'd drown in data ...

}

// Sort vmPods by creation time
sort.Slice(vmPods, func(i, j int) bool {
// i less than j
return vmPods[j].CreationTimestamp.After(vmPods[i].CreationTimestamp.Time)
})

status := LiveMigrationStatus{
SourcePod: vmPods[0],
TargetPod: vmPods[1],
Comment on lines +422 to +423
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could we trim down the objects ? IIUC we just require the labels on the pods. Right ?

State: LiveMigrationInProgress,
}

if isTargetPodReady(status.TargetPod) {
status.State = LiveMigrationTargetDomainReady
}
return &status, nil
}
2 changes: 1 addition & 1 deletion go-controller/pkg/libovsdb/ops/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation,
func getAllUpdatableFields(model model.Model) []interface{} {
switch t := model.(type) {
case *nbdb.LogicalSwitchPort:
return []interface{}{&t.Addresses, &t.Type, &t.TagRequest, &t.Options, &t.PortSecurity}
return []interface{}{&t.Addresses, &t.Type, &t.TagRequest, &t.Options, &t.PortSecurity, &t.Enabled}
case *nbdb.PortGroup:
return []interface{}{&t.ACLs, &t.Ports, &t.ExternalIDs}
default:
Expand Down
30 changes: 29 additions & 1 deletion go-controller/pkg/ovn/base_network_controller_pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,15 @@ import (
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt"
logicalswitchmanager "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util"
kapi "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"

libovsdbclient "github.com/ovn-org/libovsdb/client"
"github.com/ovn-org/libovsdb/ovsdb"
Expand Down Expand Up @@ -588,7 +590,29 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *kapi.Pod, nadName
}
}

ops, err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitchOps(bnc.nbClient, nil, ls, lsp)
lsps := []*nbdb.LogicalSwitchPort{lsp}
if kubevirt.IsPodOwnedByVirtualMachine(pod) && bnc.isAllowedForMigration() {
kubevirtLiveMigrationStatus, err := kubevirt.DiscoverLiveMigrationStatus(bnc.watchFactory, pod)
if err != nil {
return nil, nil, nil, false, err
}
if kubevirtLiveMigrationStatus != nil && pod.Name == kubevirtLiveMigrationStatus.TargetPod.Name {
lsp.Enabled = ptr.To(kubevirtLiveMigrationStatus.State == kubevirt.LiveMigrationTargetDomainReady)
// kubevirt do not update source pod, so the controller cannot wait for
// it to disable the LSP, let's just do it here at the same operation
if kubevirtLiveMigrationStatus.State == kubevirt.LiveMigrationTargetDomainReady {
sourcePodLSP := &nbdb.LogicalSwitchPort{
Name: bnc.GetLogicalPortName(kubevirtLiveMigrationStatus.SourcePod, nadName),
Enabled: ptr.To(false),
}
lsps = append(lsps, sourcePodLSP)
klog.Infof("DELETEME, source.pod: %s, source.enabled: %t", kubevirtLiveMigrationStatus.SourcePod.Name, *sourcePodLSP.Enabled)
}
klog.Infof("DELETEME, target.pod: %s, target.enabled: %t", pod.Name, *lsp.Enabled)
}
}

ops, err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitchOps(bnc.nbClient, nil, ls, lsps...)
if err != nil {
return nil, nil, nil, false,
fmt.Errorf("error creating logical switch port %+v on switch %+v: %+v", *lsp, *ls, err)
Expand Down Expand Up @@ -1162,3 +1186,7 @@ func (bnc *BaseNetworkController) wasPodReleasedBeforeStartup(uid, nad string) b
}
return bnc.releasedPodsBeforeStartup[nad].Has(uid)
}

func (bnc *BaseNetworkController) isAllowedForMigration() bool {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd have this as a function instead. There's no need for the layer3 controller to have this method for instance.

Also, should't localnet be also a "customer" of this feature ?... The exact same mechanism should be re-used there to improve the migration downtime, right ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Having a function that is going to receive NetInfo is kind of the same also if will be in the same context as layer3, also I am total agains a free function receiving random booleans.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand what you mean, but FWIW, I also don't want a function that receives random booleans.

return bnc.IsSecondary() && bnc.TopologyType() == types.Layer2Topology
}
Loading