From 090b3a61c8dbc123595a7fdd7771b6525b7ebfcb Mon Sep 17 00:00:00 2001 From: wgrayson Date: Fri, 29 Jul 2022 13:39:47 -0700 Subject: [PATCH] Address comments Address comments and add more validation UT cases. Signed-off-by: wgrayson --- build/yamls/antrea.yml | 8 +- docs/antrea-network-policy.md | 31 ++-- .../networkpolicy_controller_test.go | 2 +- .../controller/networkpolicy/reconciler.go | 140 +++++++-------- pkg/agent/controller/networkpolicy/reject.go | 2 +- pkg/apis/controlplane/sets.go | 9 +- pkg/apis/controlplane/v1beta2/sets.go | 5 +- .../networkpolicy/networkpolicy_controller.go | 8 +- pkg/controller/networkpolicy/validate_test.go | 170 ++++++++++++++++++ 9 files changed, 277 insertions(+), 98 deletions(-) diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index 447275ec2b9..acf36182203 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -2767,7 +2767,7 @@ data: # Provide the address of Kubernetes apiserver, to override any value provided in kubeconfig or InClusterConfig. # Defaults to "". It must be a host string, a host:port pair, or a URL to the base of the apiserver. - kubeAPIServerOverride: "" + kubeAPIServerOverride: "https://192.168.77.100:6443" # Provide the address of DNS server, to override the kube-dns service. It's used to resolve hostname in FQDN policy. # Defaults to "". It must be a host string or a host:port pair of the DNS server (e.g. 10.96.0.10, 10.96.0.10:53, @@ -2817,7 +2817,7 @@ data: # feature to be enabled. # Note that this option is experimental. If kube-proxy is removed, option kubeAPIServerOverride must be used to access # apiserver directly. - proxyAll: false + proxyAll: true # A string array of values which specifies the host IPv4/IPv6 addresses for NodePort. Values can be valid IP blocks. # (e.g. 1.2.3.0/24, 1.2.3.4/32). An empty string slice is meant to select all host IPv4/IPv6 addresses. # Note that the option is only valid when proxyAll is true. @@ -3712,7 +3712,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 0814cc9f3baa94e76e83a108b04d05200485610c7f5950c584503af7151a9e86 + checksum/config: 5204c8793a312441190994144e04cce286931362a4d92d3d692a945ad333fb65 labels: app: antrea component: antrea-agent @@ -3952,7 +3952,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 0814cc9f3baa94e76e83a108b04d05200485610c7f5950c584503af7151a9e86 + checksum/config: 5204c8793a312441190994144e04cce286931362a4d92d3d692a945ad333fb65 labels: app: antrea component: antrea-controller diff --git a/docs/antrea-network-policy.md b/docs/antrea-network-policy.md index 555ee539b0a..b67e392a591 100644 --- a/docs/antrea-network-policy.md +++ b/docs/antrea-network-policy.md @@ -39,7 +39,7 @@ - [Node Selector](#node-selector) - [toServices egress rules](#toservices-egress-rules) - [ServiceAccount based selection](#serviceaccount-based-selection) - - [ACNP appliedTo NodePort Service](#acnp-appliedto-nodeport-service) + - [Apply to NodePort Service](#apply-to-nodeport-service) - [ClusterGroup](#clustergroup) - [ClusterGroup CRD](#clustergroup-crd) - [kubectl commands for ClusterGroup](#kubectl-commands-for-clustergroup) @@ -484,9 +484,9 @@ Specific Pods from specific Namespaces can be selected by providing both a The `appliedTo` field can also reference a ClusterGroup resource by setting the ClusterGroup's name in `group` field in place of the stand-alone selectors. The `appliedTo` field can also reference a Service by setting the Service's name -and namespace in `service` field in place of the stand-alone selectors. Only a +and Namespace in `service` field in place of the stand-alone selectors. Only a NodePort Service can be referred by this field. More details can be found in the -[ACNPAppliedToNodePortService](#acnp-appliedto-nodeport-service) section. +[ApplyToNodePortService](#apply-to-nodeport-service) section. IPBlock cannot be set in the `appliedTo` field. An IPBlock ClusterGroup referenced in an `appliedTo` field will be ignored, and the policy will have no effect. @@ -1293,7 +1293,7 @@ spec: In this example, the policy will be applied to all Pods whose ServiceAccount is `sa-1` of `ns-1`. Let's call those Pods "appliedToPods". -The egress `to` section will select all Pods whose ServiceAccount is in `ns-2` namespace and name as `sa-2`. +The egress `to` section will select all Pods whose ServiceAccount is in `ns-2` Namespace and name as `sa-2`. Let's call those Pods "egressPods". After this policy is applied, traffic from "appliedToPods" to "egressPods" will be dropped. @@ -1301,22 +1301,21 @@ Note: Antrea will use a reserved label key for internal processing `serviceAccou The reserved label looks like: `internal.antrea.io/service-account:[ServiceAccountName]`. Users should avoid using this label key in any entities no matter if a policy with `serviceAccount` is applied in the cluster. -### ACNP appliedTo NodePort Service +### Apply to NodePort Service -Antrea ClusterNetworkPolicy features a `service` field in `appliedTo` field to enable the ACNP could be enforced -on the traffic from external client to a NodePort Service. +Antrea ClusterNetworkPolicy features a `service` field in `appliedTo` field to enforce the ACNP rules on the +traffic from external clients to a NodePort Service. -`service` uses `namespace` and `name` to select the Service with a specific name under a specific namespace and +`service` uses `namespace` and `name` to select the Service with a specific name under a specific Namespace; only a NodePort Service can be referred by `service` field. -`service` field cannot be used with any other fields and a policy or a rule can't be applied to NodePort Service -and other peers at the same time. +There are a few **restrictions** on configuring a policy/rule that applies to NodePort Services: -Since `service` field is used to control the external access of a NodePort Service, then - -1. If a `appliedTo` with `service` is used at policy level, then this policy can only contain ingress rules. -2. If a `appliedTo` with `service` is used at rule level, then this rule can only be an ingress rule. -3. If an ingress rule is applied to a NodePort Service, then this ingress can only use `ipBlock` in its `from` field. +1. `service` field cannot be used with any other fields in `appliedTo`. +2. a policy or a rule can't be applied to both a NodePort Service and other entities at the same time. +3. If a `appliedTo` with `service` is used at policy level, then this policy can only contain ingress rules. +4. If a `appliedTo` with `service` is used at rule level, then this rule can only be an ingress rule. +5. If an ingress rule is applied to a NodePort Service, then this rule can only use `ipBlock` in its `from` field. An example policy using `service` in `appliedTo` could look like this: @@ -1339,7 +1338,7 @@ spec: cidr: 1.1.1.0/24 ``` -In this example, the policy will be applied to the NodePort Service `svc-1` in Namespace `ns-1` +In this example, the policy will be applied to the NodePort Service `svc-1` in Namespace `ns-1`, and drop all packets from CIDR `1.1.1.0/24`. ## ClusterGroup diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go index 999c7787176..31549367173 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go @@ -674,7 +674,7 @@ func TestValidate(t *testing.T) { groups := v1beta2.GroupMemberSet{} groupAddress1, groupAddress2 := "225.1.2.3", "225.1.2.4" - groups["ns1/pod1"] = newAppliedToGroupMemberPod("pod1", "ns1") + groups["Pod:ns1/pod1"] = newAppliedToGroupMemberPod("pod1", "ns1") controller.ruleCache.appliedToSetByGroup["appliedToGroup01"] = groups controller.ruleCache.rules.Add(rule1) controller.ruleCache.rules.Add(rule2) diff --git a/pkg/agent/controller/networkpolicy/reconciler.go b/pkg/agent/controller/networkpolicy/reconciler.go index d86c41580e9..0a5b1fe4f95 100644 --- a/pkg/agent/controller/networkpolicy/reconciler.go +++ b/pkg/agent/controller/networkpolicy/reconciler.go @@ -162,24 +162,24 @@ type lastRealized struct { // the fqdn selector of this policy rule. It must be empty for policy rule // that is not egress and does not have toFQDN field. fqdnIPAddresses sets.String - // groupIDAddresses tracks the last realized set of groupIDs resolved for the + // serviceGroupIDs tracks the last realized set of groupIDs resolved for the // toServices of this policy rule or services of TargetMember of this policy rule. // It must be empty for policy rule that is neither an egress rule with toServices // field nor an ingress rule that is applied to Services. - groupIDAddresses sets.Int64 + serviceGroupIDs sets.Int64 // groupAddresses track the latest realized set of multicast groups for the multicast traffic groupAddresses sets.String } func newLastRealized(rule *CompletedRule) *lastRealized { return &lastRealized{ - ofIDs: map[servicesKey]uint32{}, - CompletedRule: rule, - podOFPorts: map[servicesKey]sets.Int32{}, - podIPs: nil, - fqdnIPAddresses: nil, - groupIDAddresses: nil, - groupAddresses: nil, + ofIDs: map[servicesKey]uint32{}, + CompletedRule: rule, + podOFPorts: map[servicesKey]sets.Int32{}, + podIPs: nil, + fqdnIPAddresses: nil, + serviceGroupIDs: nil, + groupAddresses: nil, } } @@ -504,7 +504,7 @@ func (r *reconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) e if r.fqdnController != nil { lastRealized.fqdnIPAddresses = nil } - lastRealized.groupIDAddresses = nil + lastRealized.serviceGroupIDs = nil return err } // Record ofID only if its Openflow is installed successfully. @@ -551,13 +551,9 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1 for svcKey, members := range membersByServicesMap { toAddresses := make([]types.Address, 0) if isRuleAppliedToService { - addressSet := sets.NewInt64() - for _, member := range members.Items() { - curAddresses, curAddressSet := r.svcRefToOFAddresses(*member.Service) - toAddresses = append(toAddresses, curAddresses...) - addressSet = addressSet.Union(curAddressSet) - } - lastRealized.groupIDAddresses = addressSet + svcGroupIDs := r.getSvcGroupIDs(members) + toAddresses = svcGroupIDsToOFAddresses(svcGroupIDs) + lastRealized.serviceGroupIDs = svcGroupIDs } else { ofPorts := r.getOFPorts(members) toAddresses = ofPortsToOFAddresses(ofPorts) @@ -647,16 +643,11 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1 lastRealized.fqdnIPAddresses = addressSet } if len(rule.To.ToServices) > 0 { - var addresses []types.Address - addressSet := sets.NewInt64() - for _, svcRef := range rule.To.ToServices { - curAddresses, curAddressSet := r.svcRefToOFAddresses(svcRef) - addresses = append(addresses, curAddresses...) - addressSet = addressSet.Union(curAddressSet) - } + svcGroupIDs := r.svcRefsToGroupIDs(rule.To.ToServices) + addresses := svcGroupIDsToOFAddresses(svcGroupIDs) ofRule.To = append(ofRule.To, addresses...) // If the rule installation fails, this will be reset. - lastRealized.groupIDAddresses = addressSet + lastRealized.serviceGroupIDs = svcGroupIDs } } } @@ -762,18 +753,14 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, membersByServicesMap, servicesMap := groupMembersByServices(newRule.Services, newRule.TargetMembers) for svcKey, members := range membersByServicesMap { newOFPorts := r.getOFPorts(members) - newGroupIDAddressSet := sets.NewInt64() - ofID, exists := lastRealized.ofIDs[svcKey] - toAddresses := make([]types.Address, 0) + newGroupIDSet := r.getSvcGroupIDs(members) + var toAddresses []types.Address if isRuleAppliedToService { - for _, member := range members.Items() { - curAddresses, curAddressSet := r.svcRefToOFAddresses(*member.Service) - toAddresses = append(toAddresses, curAddresses...) - newGroupIDAddressSet = newGroupIDAddressSet.Union(curAddressSet) - } + toAddresses = svcGroupIDsToOFAddresses(newGroupIDSet) } else { toAddresses = ofPortsToOFAddresses(newOFPorts) } + ofID, exists := lastRealized.ofIDs[svcKey] // Install a new Openflow rule if this group doesn't exist, otherwise do incremental update. if !exists { ofRule := &types.PolicyRule{ @@ -797,22 +784,21 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, } lastRealized.ofIDs[svcKey] = ofRule.FlowID } else { - addedTo := ofPortsToOFAddresses(newOFPorts.Difference(lastRealized.podOFPorts[svcKey])) - deletedTo := ofPortsToOFAddresses(lastRealized.podOFPorts[svcKey].Difference(newOFPorts)) + var addedTo, deletedTo []types.Address if isRuleAppliedToService { - originalGroupIDAddressSet := sets.NewInt64() - if lastRealized.groupIDAddresses != nil { - originalGroupIDAddressSet = lastRealized.groupIDAddresses - } - addedGroupIDAddress := newGroupIDAddressSet.Difference(originalGroupIDAddressSet) - removedGroupIDAddress := originalGroupIDAddressSet.Difference(newGroupIDAddressSet) - for a := range addedGroupIDAddress { - addedTo = append(addedTo, openflow.NewServiceGroupIDAddress(binding.GroupIDType(a))) + originalGroupIDSet := sets.NewInt64() + if lastRealized.serviceGroupIDs != nil { + originalGroupIDSet = lastRealized.serviceGroupIDs } - for r := range removedGroupIDAddress { - deletedTo = append(deletedTo, openflow.NewServiceGroupIDAddress(binding.GroupIDType(r))) + addedTo = svcGroupIDsToOFAddresses(newGroupIDSet.Difference(originalGroupIDSet)) + deletedTo = svcGroupIDsToOFAddresses(originalGroupIDSet.Difference(newGroupIDSet)) + } else { + originalOfPortsSet := sets.NewInt32() + if lastRealized.podOFPorts[svcKey] != nil { + originalOfPortsSet = lastRealized.podOFPorts[svcKey] } - lastRealized.groupIDAddresses = newGroupIDAddressSet + addedTo = ofPortsToOFAddresses(newOFPorts.Difference(originalOfPortsSet)) + deletedTo = ofPortsToOFAddresses(originalOfPortsSet.Difference(newOFPorts)) } if err := r.updateOFRule(ofID, addedFrom, addedTo, deletedFrom, deletedTo, ofPriority); err != nil { return err @@ -821,6 +807,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, delete(staleOFIDs, svcKey) } lastRealized.podOFPorts[svcKey] = newOFPorts + lastRealized.serviceGroupIDs = newGroupIDSet } } else { if r.fqdnController != nil && len(newRule.To.FQDNs) > 0 { @@ -894,23 +881,14 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, } } } - originalGroupIDAddressSet, newGroupIDAddressSet := sets.NewInt64(), sets.NewInt64() - if lastRealized.groupIDAddresses != nil { - originalGroupIDAddressSet = lastRealized.groupIDAddresses - } + originalGroupIDSet, newGroupIDSet := sets.NewInt64(), sets.NewInt64() if len(newRule.To.ToServices) > 0 { - for _, svcRef := range newRule.To.ToServices { - _, groupIDSets := r.svcRefToOFAddresses(svcRef) - newGroupIDAddressSet = newGroupIDAddressSet.Union(groupIDSets) - } - addedGroupIDAddress := newGroupIDAddressSet.Difference(originalGroupIDAddressSet) - removedGroupIDAddress := originalGroupIDAddressSet.Difference(newGroupIDAddressSet) - for a := range addedGroupIDAddress { - addedTo = append(addedTo, openflow.NewServiceGroupIDAddress(binding.GroupIDType(a))) - } - for r := range removedGroupIDAddress { - deletedTo = append(deletedTo, openflow.NewServiceGroupIDAddress(binding.GroupIDType(r))) + if lastRealized.serviceGroupIDs != nil { + originalGroupIDSet = lastRealized.serviceGroupIDs } + newGroupIDSet = r.svcRefsToGroupIDs(newRule.To.ToServices) + addedTo = svcGroupIDsToOFAddresses(newGroupIDSet.Difference(originalGroupIDSet)) + deletedTo = svcGroupIDsToOFAddresses(originalGroupIDSet.Difference(newGroupIDSet)) } if err := r.updateOFRule(ofID, addedFrom, addedTo, deletedFrom, deletedTo, ofPriority); err != nil { return err @@ -920,7 +898,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, lastRealized.fqdnIPAddresses = newFQDNAddressSet } // Update the groupID address set if rule installation succeeds. - lastRealized.groupIDAddresses = newGroupIDAddressSet + lastRealized.serviceGroupIDs = newGroupIDSet // Delete valid servicesKey from staleOFIDs. delete(staleOFIDs, svcKey) } @@ -1094,6 +1072,19 @@ func (r *reconciler) getIPs(members v1beta2.GroupMemberSet) sets.String { return ips } +func (r *reconciler) getSvcGroupIDs(members v1beta2.GroupMemberSet) sets.Int64 { + var svcRefs []v1beta2.ServiceReference + for _, m := range members { + if m.Service != nil { + svcRefs = append(svcRefs, v1beta2.ServiceReference{ + Name: m.Service.Name, + Namespace: m.Service.Namespace, + }) + } + } + return r.svcRefsToGroupIDs(svcRefs) +} + // groupMembersByServices groups the provided groupMembers based on their services resolving result. // A map of servicesHash to the grouped members and a map of servicesHash to the services resolving result will be returned. func groupMembersByServices(services []v1beta2.Service, memberSet v1beta2.GroupMemberSet) (map[servicesKey]v1beta2.GroupMemberSet, map[servicesKey][]v1beta2.Service) { @@ -1143,16 +1134,25 @@ func ofPortsToOFAddresses(ofPorts sets.Int32) []types.Address { return addresses } -func (r *reconciler) svcRefToOFAddresses(svcRef v1beta2.ServiceReference) ([]types.Address, sets.Int64) { - var addresses []types.Address - addressSet := sets.NewInt64() - for _, groupCounter := range r.groupCounters { - for _, groupID := range groupCounter.GetAllGroupIDs(k8s.NamespacedName(svcRef.Namespace, svcRef.Name)) { - addresses = append(addresses, openflow.NewServiceGroupIDAddress(groupID)) - addressSet.Insert(int64(groupID)) +func (r *reconciler) svcRefsToGroupIDs(svcRefs []v1beta2.ServiceReference) sets.Int64 { + groupIDs := sets.NewInt64() + for _, svcRef := range svcRefs { + for _, groupCounter := range r.groupCounters { + for _, groupID := range groupCounter.GetAllGroupIDs(k8s.NamespacedName(svcRef.Namespace, svcRef.Name)) { + groupIDs.Insert(int64(groupID)) + } } } - return addresses, addressSet + return groupIDs +} + +func svcGroupIDsToOFAddresses(groupIDs sets.Int64) []types.Address { + // Must not return nil as it means not restricted by addresses in Openflow implementation. + addresses := make([]types.Address, 0, len(groupIDs)) + for _, groupID := range groupIDs.List() { + addresses = append(addresses, openflow.NewServiceGroupIDAddress(binding.GroupIDType(groupID))) + } + return addresses } func groupMembersToOFAddresses(groupMemberSet v1beta2.GroupMemberSet) []types.Address { diff --git a/pkg/agent/controller/networkpolicy/reject.go b/pkg/agent/controller/networkpolicy/reject.go index 3c354142c0f..e79a4093e7f 100644 --- a/pkg/agent/controller/networkpolicy/reject.go +++ b/pkg/agent/controller/networkpolicy/reject.go @@ -145,7 +145,7 @@ func (c *Controller) rejectRequest(pktIn *ofctrl.PacketIn) error { return fmt.Errorf("error when generating reject response for the packet from: %s to %s: neither source nor destination are on this Node", dstIP, srcIP) } if packetOutType == RejectServiceRemoteToExternal { - dstMAC = "aa:bb:cc:dd:ee:ff" + dstMAC = openflow.GlobalVirtualMAC.String() } // When in AntreaIPAM mode, even though srcPod and dstPod are on the same Node, MAC // will still be re-written in L3ForwardingTable. During rejection, the reject diff --git a/pkg/apis/controlplane/sets.go b/pkg/apis/controlplane/sets.go index 9dd9d1d60c4..7bc7bbe5799 100644 --- a/pkg/apis/controlplane/sets.go +++ b/pkg/apis/controlplane/sets.go @@ -30,7 +30,7 @@ type groupMemberKey string type GroupMemberSet map[groupMemberKey]*GroupMember // normalizeGroupMember calculates the groupMemberKey of the provided -// GroupMember based on the Pod/ExternalEntity's namespaced name and IPs. +// GroupMember based on the Pod/ExternalEntity/Service's namespaced name and IPs. // For GroupMembers in appliedToGroups, the IPs are not set, so the // generated key does not contain IP information. func normalizeGroupMember(member *GroupMember) groupMemberKey { @@ -38,13 +38,20 @@ func normalizeGroupMember(member *GroupMember) groupMemberKey { const delimiter = "/" var b strings.Builder if member.Pod != nil { + b.WriteString("Pod:") b.WriteString(member.Pod.Namespace) b.WriteString(delimiter) b.WriteString(member.Pod.Name) } else if member.ExternalEntity != nil { + b.WriteString("ExternalEntity:") b.WriteString(member.ExternalEntity.Namespace) b.WriteString(delimiter) b.WriteString(member.ExternalEntity.Name) + } else if member.Service != nil { + b.WriteString("Service:") + b.WriteString(member.Service.Namespace) + b.WriteString(delimiter) + b.WriteString(member.Service.Name) } for _, ip := range member.IPs { b.Write(ip) diff --git a/pkg/apis/controlplane/v1beta2/sets.go b/pkg/apis/controlplane/v1beta2/sets.go index 0a35f1ec525..005c322a9b7 100644 --- a/pkg/apis/controlplane/v1beta2/sets.go +++ b/pkg/apis/controlplane/v1beta2/sets.go @@ -30,7 +30,7 @@ type groupMemberKey string type GroupMemberSet map[groupMemberKey]*GroupMember // normalizeGroupMember calculates the groupMemberKey of the provided -// GroupMember based on the Pod/ExternalEntity's namespaced name and IPs. +// GroupMember based on the Pod/ExternalEntity/Service's namespaced name and IPs. // For GroupMembers in appliedToGroups, the IPs are not set, so the // generated key does not contain IP information. func normalizeGroupMember(member *GroupMember) groupMemberKey { @@ -38,14 +38,17 @@ func normalizeGroupMember(member *GroupMember) groupMemberKey { const delimiter = "/" var b strings.Builder if member.Pod != nil { + b.WriteString("Pod:") b.WriteString(member.Pod.Namespace) b.WriteString(delimiter) b.WriteString(member.Pod.Name) } else if member.ExternalEntity != nil { + b.WriteString("ExternalEntity:") b.WriteString(member.ExternalEntity.Namespace) b.WriteString(delimiter) b.WriteString(member.ExternalEntity.Name) } else if member.Service != nil { + b.WriteString("Service:") b.WriteString(member.Service.Namespace) b.WriteString(delimiter) b.WriteString(member.Service.Name) diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index 6e31d859a7b..02d7856f809 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -18,7 +18,6 @@ package networkpolicy import ( - "context" "fmt" "net" "reflect" @@ -1250,13 +1249,14 @@ func (n *NetworkPolicyController) syncAppliedToGroup(key string) error { var updatedAppliedToGroup *antreatypes.AppliedToGroup if appliedToGroup.Service != nil { // AppliedToGroup for NodePort Service span to all Nodes. - nodeList, err := n.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := n.nodeLister.List(labels.Everything()) if err != nil { return fmt.Errorf("unable to list Nodes") } - for _, node := range nodeList.Items { + serviceGroupMemberSet := controlplane.NewGroupMemberSet(serviceToGroupMember(appliedToGroup.Service)) + for _, node := range nodeList { appGroupNodeNames.Insert(node.Name) - memberSetByNode[node.Name] = controlplane.NewGroupMemberSet(serviceToGroupMember(appliedToGroup.Service)) + memberSetByNode[node.Name] = serviceGroupMemberSet } updatedAppliedToGroup = &antreatypes.AppliedToGroup{ UID: appliedToGroup.UID, diff --git a/pkg/controller/networkpolicy/validate_test.go b/pkg/controller/networkpolicy/validate_test.go index 059b754899a..2ac1dc93737 100644 --- a/pkg/controller/networkpolicy/validate_test.go +++ b/pkg/controller/networkpolicy/validate_test.go @@ -1053,6 +1053,176 @@ func TestValidateAntreaPolicy(t *testing.T) { }, expectedReason: "Invalid label key: foo=: name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')", }, + { + name: "acnp-appliedto-service-set-with-psel", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-appliedto-service-set-with-psel", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo1": "bar1"}, + }, + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo2", + Name: "bar2", + }, + }, + }, + }, + }, + expectedReason: "service cannot be set with other peers in appliedTo", + }, + { + name: "acnp-appliedto-service-and-psel", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-appliedto-service-and-psel", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo1": "bar1"}, + }, + }, + { + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo2", + Name: "bar2", + }, + }, + }, + }, + }, + expectedReason: "a rule/policy cannot be applied to Services and other peers at the same time", + }, + { + name: "acnp-appliedto-service-with-egress-rule", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-appliedto-service-with-egress-rule", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo1", + Name: "bar1", + }, + }, + }, + Egress: []crdv1alpha1.Rule{ + { + Action: &allowAction, + To: []crdv1alpha1.NetworkPolicyPeer{ + { + IPBlock: &crdv1alpha1.IPBlock{ + CIDR: "10.0.0.10/32", + }, + }, + }, + }, + }, + }, + }, + expectedReason: "egress rule cannot be applied to Services", + }, + { + name: "egress-rule-appliedto-service", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-rule-appliedto-service", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + Egress: []crdv1alpha1.Rule{ + { + Action: &allowAction, + To: []crdv1alpha1.NetworkPolicyPeer{ + { + IPBlock: &crdv1alpha1.IPBlock{ + CIDR: "10.0.0.10/32", + }, + }, + }, + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo1", + Name: "bar1", + }, + }, + }, + }, + }, + }, + }, + expectedReason: "egress rule cannot be applied to Services", + }, + { + name: "acnp-appliedto-service-from-psel", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-rule-appliedto-service", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo1", + Name: "bar1", + }, + }, + }, + Ingress: []crdv1alpha1.Rule{ + { + Action: &allowAction, + From: []crdv1alpha1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo1": "bar1"}, + }, + }, + }, + }, + }, + }, + }, + expectedReason: "a rule/policy that is applied to Services can only use ipBlock to select workloads", + }, + { + name: "acnp-appliedto-service-valid", + policy: &crdv1alpha1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-rule-appliedto-service", + }, + Spec: crdv1alpha1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ + { + Service: &crdv1alpha1.NamespacedName{ + Namespace: "foo1", + Name: "bar1", + }, + }, + }, + Ingress: []crdv1alpha1.Rule{ + { + Action: &allowAction, + From: []crdv1alpha1.NetworkPolicyPeer{ + { + IPBlock: &crdv1alpha1.IPBlock{ + CIDR: "10.0.0.10/32", + }, + }, + }, + }, + }, + }, + }, + expectedReason: "", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {