Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add K8sNP endPort support #2190

Merged
merged 3 commits into from
May 26, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/kind.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ on:
- release-*

env:
KIND_VERSION: v0.9.0
KIND_VERSION: v0.11.0

jobs:
check-changes:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/kind_upgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ on:
- release-*

env:
KIND_VERSION: v0.9.0
KIND_VERSION: v0.11.0

jobs:
check-changes:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/netpol_cyclonus.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on:
- cron: '0 0 * * *'

env:
KIND_VERSION: v0.9.0
KIND_VERSION: v0.11.0

jobs:

Expand Down
2 changes: 2 additions & 0 deletions ci/kind/kind-setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ function create {
cat <<EOF > $config_file
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
featureGates:
NetworkPolicyEndPort: true
networking:
disableDefaultCNI: true
podSubnet: $POD_CIDR
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/networkpolicy/networkpolicy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,11 +529,11 @@ func toAntreaServices(npPorts []networkingv1.NetworkPolicyPort) ([]controlplane.
if npPort.Port != nil && npPort.Port.Type == intstr.String {
namedPortExists = true
}
antreaService := controlplane.Service{
antreaServices = append(antreaServices, controlplane.Service{
Protocol: toAntreaProtocol(npPort.Protocol),
Port: npPort.Port,
}
antreaServices = append(antreaServices, antreaService)
EndPort: npPort.EndPort,
})
}
return antreaServices, namedPortExists
}
Expand Down
57 changes: 56 additions & 1 deletion pkg/controller/networkpolicy/networkpolicy_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -532,6 +532,61 @@ func TestAddNetworkPolicy(t *testing.T) {
expAppliedToGroups: 1,
expAddressGroups: 2,
},
{
name: "rule-with-end-port",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npG", UID: "uidG"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Protocol: &k8sProtocolTCP,
Port: &int1000,
EndPort: &int32For1999,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidG",
Name: "uidG",
SourceRef: &controlplane.NetworkPolicyReference{
Type: controlplane.K8sNetworkPolicy,
Namespace: "nsA",
Name: "npG",
UID: "uidG",
},
Rules: []controlplane.NetworkPolicyRule{
{
Direction: controlplane.DirectionIn,
From: controlplane.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil, nil).NormalizedName)},
},
Services: []controlplane.Service{
{
Protocol: &protocolTCP,
Port: &int1000,
EndPort: &int32For1999,
},
},
Priority: defaultRulePriority,
Action: &defaultAction,
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand All @@ -549,7 +604,7 @@ func TestAddNetworkPolicy(t *testing.T) {
for _, tt := range tests {
npc.addNetworkPolicy(tt.inputPolicy)
}
assert.Equal(t, 6, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 6")
assert.Equal(t, 7, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 7")
assert.Equal(t, 4, npc.GetAddressGroupNum(), "expected addressGroup number is 4")
assert.Equal(t, 2, npc.GetAppliedToGroupNum(), "appliedToGroup number is 2")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,5 @@ networking:
apiServer:
certSANs:
- "{{ k8s_api_server_ip }}"
extraArgs:
feature-gates: "NetworkPolicyEndPort=true"
Comment on lines +16 to +17
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you are enabling EndPort in the Vagrant testbed, which is fine by me, but there is no e2e test for EndPort yet?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe @GraysonWu mentioned that he is looking into adding the feature gate in antrea/ci/jenkins/ and antrea/ci/kind/, maybe you could share a bit more info on that front? Also, some amount of documentation on how the feature gate can be enabled for the k8s-apiserver will be very useful.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just added e2e test and enabled endport feature gate in Kind.
About Jenkins, I asked zhecheng for more information. Let's see his reply if they support 1.21.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As @lzhecheng said, the testbed hasn't upgraded to 1.21. He will add that feature gate while upgrading.

161 changes: 161 additions & 0 deletions test/e2e/networkpolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -806,6 +806,167 @@ func TestIngressPolicyWithoutPortNumber(t *testing.T) {
}
}

func TestIngressPolicyWithEndPort(t *testing.T) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How the test works if the testbed doesn't support 1.21 yet?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same question, maybe we can add some mechanism to check if the feature gate is enabled? Didn't find a clean way but a simple look into the api server process seems to do the trick https://stackoverflow.com/questions/50441452/check-if-the-feature-gate-is-enabled-disabled-in-kubernetes

ps aux | grep apiserver | grep feature-gates
root       15458  7.8 19.7 1449248 402504 ?      Ssl  May23 185:47 kube-apiserver --advertise-address=192.168.77.100  .............. --etcd-servers=https://127.0.0.1:2379 --feature-gates=NetworkPolicyEndPort=true...........

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. Addressed.

skipIfHasWindowsNodes(t)

data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)

serverPort := int32(80)
serverEndPort := int32(84)
policyPort := int32(81)
policyEndPort := int32(83)

var serverPorts []int32
for i := serverPort; i <= serverEndPort; i++ {
serverPorts = append(serverPorts, i)
}

// makeContainerSpec creates a Container listening on a specific port.
makeContainerSpec := func(port int32) corev1.Container {
return corev1.Container{
Name: fmt.Sprintf("c%d", port),
ImagePullPolicy: corev1.PullIfNotPresent,
Image: agnhostImage,
Command: []string{"/bin/bash", "-c"},
Args: []string{fmt.Sprintf("/agnhost serve-hostname --tcp --http=false --port=%d", port)},
Ports: []corev1.ContainerPort{
{
ContainerPort: port,
Name: fmt.Sprintf("serve-%d", port),
Protocol: corev1.ProtocolTCP,
},
},
}
}

// createAgnhostPodOnNodeWithMultiPort creates a Pod in the test namespace with
// multiple agnhost containers listening on multiple ports.
// The Pod will be scheduled on the specified Node (if nodeName is not empty).
createAgnhostPodOnNodeWithMultiPort := func(name string, nodeName string) error {
var containers []corev1.Container
for _, port := range serverPorts {
containers = append(containers, makeContainerSpec(port))
}
podSpec := corev1.PodSpec{
Containers: containers,
RestartPolicy: corev1.RestartPolicyNever,
HostNetwork: false,
}
if nodeName != "" {
podSpec.NodeSelector = map[string]string{
"kubernetes.io/hostname": nodeName,
}
}
if nodeName == controlPlaneNodeName() {
// tolerate NoSchedule taint if we want Pod to run on control-plane Node
noScheduleToleration := controlPlaneNoScheduleToleration()
podSpec.Tolerations = []corev1.Toleration{noScheduleToleration}
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"antrea-e2e": name,
"app": getImageName(agnhostImage),
},
},
Spec: podSpec,
}
if _, err := data.clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}

serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, createAgnhostPodOnNodeWithMultiPort, "test-server-", "")
defer cleanupFunc()

clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "")
defer cleanupFunc()

preCheck := func(serverIP string) {
// The client can connect to server on all ports.
for _, port := range serverPorts {
if err = data.runNetcatCommandFromTestPod(clientName, serverIP, port); err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
}
}

if clusterInfo.podV4NetworkCIDR != "" {
preCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
preCheck(serverIPs.ipv6.String())
}

protocol := corev1.ProtocolTCP
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": serverName,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Protocol: &protocol,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: policyPort},
EndPort: &policyEndPort,
},
},
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"antrea-e2e": clientName,
},
}},
},
},
},
}
np, err := data.createNetworkPolicy("test-networkpolicy-ingress-with-endport", spec)
if err != nil {
t.Fatalf("Error when creating NetworkPolicy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Errorf("Error when deleting NetworkPolicy: %v", err)
}
}()

if np.Spec.Ingress[0].Ports[0].EndPort == nil {
t.Skipf("Skipping test as the kube-apiserver doesn't support `endPort` " +
"or `NetworkPolicyEndPort` feature-gate is not enabled.")
}

npCheck := func(serverIP string) {
GraysonWu marked this conversation as resolved.
Show resolved Hide resolved
GraysonWu marked this conversation as resolved.
Show resolved Hide resolved
for _, port := range serverPorts {
err = data.runNetcatCommandFromTestPod(clientName, serverIP, port)
if port >= policyPort && port <= policyEndPort {
if err != nil {
t.Errorf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
} else if err == nil {
t.Errorf("Pod %s should be not able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port)))
}
}
}

if clusterInfo.podV4NetworkCIDR != "" {
npCheck(serverIPs.ipv4.String())
}
if clusterInfo.podV6NetworkCIDR != "" {
npCheck(serverIPs.ipv6.String())
}
}

func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name string, nodeName string) error, namePrefix string, nodeName string) (string, *PodIPs, func()) {
name := randName(namePrefix)
if err := createFunc(name, nodeName); err != nil {
Expand Down