diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index 55c20c395a3..8d4230d7c3d 100755 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -10,7 +10,7 @@ on: - release-* env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: check-changes: diff --git a/.github/workflows/kind_upgrade.yml b/.github/workflows/kind_upgrade.yml index c3190d12df0..10bc4d72085 100644 --- a/.github/workflows/kind_upgrade.yml +++ b/.github/workflows/kind_upgrade.yml @@ -10,7 +10,7 @@ on: - release-* env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: check-changes: diff --git a/.github/workflows/netpol_cyclonus.yml b/.github/workflows/netpol_cyclonus.yml index 630574ac193..010f5979539 100644 --- a/.github/workflows/netpol_cyclonus.yml +++ b/.github/workflows/netpol_cyclonus.yml @@ -5,7 +5,7 @@ on: - cron: '0 0 * * *' env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: diff --git a/ci/kind/kind-setup.sh b/ci/kind/kind-setup.sh index 0c49b47dabb..61afe8c9051 100755 --- a/ci/kind/kind-setup.sh +++ b/ci/kind/kind-setup.sh @@ -233,6 +233,8 @@ function create { cat < $config_file kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + "NetworkPolicyEndPort": true networking: disableDefaultCNI: true podSubnet: $POD_CIDR diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index 0bb29fc3c12..5965fc1c360 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -529,11 +529,11 @@ func toAntreaServices(npPorts []networkingv1.NetworkPolicyPort) ([]controlplane. if npPort.Port != nil && npPort.Port.Type == intstr.String { namedPortExists = true } - antreaService := controlplane.Service{ + antreaServices = append(antreaServices, controlplane.Service{ Protocol: toAntreaProtocol(npPort.Protocol), Port: npPort.Port, - } - antreaServices = append(antreaServices, antreaService) + EndPort: npPort.EndPort, + }) } return antreaServices, namedPortExists } diff --git a/pkg/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/controller/networkpolicy/networkpolicy_controller_test.go index 7237691d04f..0d2bdeb4afa 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller_test.go @@ -532,6 +532,61 @@ func TestAddNetworkPolicy(t *testing.T) { expAppliedToGroups: 1, expAddressGroups: 2, }, + { + name: "rule-with-end-port", + inputPolicy: &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npG", UID: "uidG"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: selectorA, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &k8sProtocolTCP, + Port: &int1000, + EndPort: &int32For1999, + }, + }, + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &selectorB, + }, + }, + }, + }, + }, + }, + expPolicy: &antreatypes.NetworkPolicy{ + UID: "uidG", + Name: "uidG", + SourceRef: &controlplane.NetworkPolicyReference{ + Type: controlplane.K8sNetworkPolicy, + Namespace: "nsA", + Name: "npG", + UID: "uidG", + }, + Rules: []controlplane.NetworkPolicyRule{ + { + Direction: controlplane.DirectionIn, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int1000, + EndPort: &int32For1999, + }, + }, + Priority: defaultRulePriority, + Action: &defaultAction, + }, + }, + AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil, nil).NormalizedName)}, + }, + expAppliedToGroups: 1, + expAddressGroups: 1, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -549,7 +604,7 @@ func TestAddNetworkPolicy(t *testing.T) { for _, tt := range tests { npc.addNetworkPolicy(tt.inputPolicy) } - assert.Equal(t, 6, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 6") + assert.Equal(t, 7, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 7") assert.Equal(t, 4, npc.GetAddressGroupNum(), "expected addressGroup number is 4") assert.Equal(t, 2, npc.GetAppliedToGroupNum(), "appliedToGroup number is 2") } diff --git a/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 b/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 index ccfe7e42e72..04b9aee6181 100644 --- a/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 +++ b/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 @@ -13,3 +13,5 @@ networking: apiServer: certSANs: - "{{ k8s_api_server_ip }}" + extraArgs: + feature-gates: "NetworkPolicyEndPort=true" diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index dadee9e6a6d..c075536bea1 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -806,6 +806,163 @@ func TestIngressPolicyWithoutPortNumber(t *testing.T) { } } +func TestIngressPolicyWithEndPort(t *testing.T) { + skipIfHasWindowsNodes(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + serverPort := int32(80) + serverEndPort := int32(84) + policyPort := int32(81) + policyEndPort := int32(83) + + var serverPorts []int32 + for i := serverPort; i <= serverEndPort; i++ { + serverPorts = append(serverPorts, i) + } + + // makeContainerSpec creates a Container listening on a specific port. + makeContainerSpec := func(port int32) corev1.Container { + return corev1.Container{ + Name: fmt.Sprintf("c%d", port), + ImagePullPolicy: corev1.PullIfNotPresent, + Image: agnhostImage, + Command: []string{"/bin/bash", "-c"}, + Args: []string{fmt.Sprintf("/agnhost serve-hostname --tcp --http=false --port=%d", port)}, + Ports: []corev1.ContainerPort{ + { + ContainerPort: port, + Name: fmt.Sprintf("serve-%d", port), + Protocol: corev1.ProtocolTCP, + }, + }, + } + } + + // createAgnhostPodOnNodeWithMultiPort creates a Pod in the test namespace with + // multiple agnhost containers listening on multiple ports. + // The Pod will be scheduled on the specified Node (if nodeName is not empty). + createAgnhostPodOnNodeWithMultiPort := func(name string, nodeName string) error { + var containers []corev1.Container + for _, port := range serverPorts { + containers = append(containers, makeContainerSpec(port)) + } + podSpec := corev1.PodSpec{ + Containers: containers, + RestartPolicy: corev1.RestartPolicyNever, + HostNetwork: false, + } + if nodeName != "" { + podSpec.NodeSelector = map[string]string{ + "kubernetes.io/hostname": nodeName, + } + } + if nodeName == controlPlaneNodeName() { + // tolerate NoSchedule taint if we want Pod to run on control-plane Node + noScheduleToleration := controlPlaneNoScheduleToleration() + podSpec.Tolerations = []corev1.Toleration{noScheduleToleration} + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "antrea-e2e": name, + "app": getImageName(agnhostImage), + }, + }, + Spec: podSpec, + } + if _, err := data.clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + return err + } + return nil + } + + serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, createAgnhostPodOnNodeWithMultiPort, "test-server-", "") + defer cleanupFunc() + + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "") + defer cleanupFunc() + + preCheck := func(serverIP string) { + // The client can connect to server on all ports. + for _, port := range serverPorts { + if err = data.runNetcatCommandFromTestPod(clientName, serverIP, port); err != nil { + t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) + } + } + + } + + if clusterInfo.podV4NetworkCIDR != "" { + preCheck(serverIPs.ipv4.String()) + } + if clusterInfo.podV6NetworkCIDR != "" { + preCheck(serverIPs.ipv6.String()) + } + + protocol := corev1.ProtocolTCP + spec := &networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "antrea-e2e": serverName, + }, + }, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &protocol, + Port: &intstr.IntOrString{Type: intstr.Int, IntVal: policyPort}, + EndPort: &policyEndPort, + }, + }, + From: []networkingv1.NetworkPolicyPeer{{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "antrea-e2e": clientName, + }, + }}, + }, + }, + }, + } + np, err := data.createNetworkPolicy("test-networkpolicy-ingress-with-endport", spec) + if err != nil { + t.Fatalf("Error when creating network policy: %v", err) + } + defer func() { + if err = data.deleteNetworkpolicy(np); err != nil { + t.Fatalf("Error when deleting network policy: %v", err) + } + }() + + npCheck := func(serverIP string) { + for _, port := range serverPorts { + err = data.runNetcatCommandFromTestPod(clientName, serverIP, port) + if port >= policyPort && port <= policyEndPort { + if err != nil { + t.Errorf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) + } + } else if err == nil { + t.Errorf("Pod %s should be not able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) + } + } + } + + if clusterInfo.podV4NetworkCIDR != "" { + npCheck(serverIPs.ipv4.String()) + } + if clusterInfo.podV6NetworkCIDR != "" { + npCheck(serverIPs.ipv6.String()) + } +} + func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name string, nodeName string) error, namePrefix string, nodeName string) (string, *PodIPs, func()) { name := randName(namePrefix) if err := createFunc(name, nodeName); err != nil {