Skip to content

Commit

Permalink
Update server Pod template
Browse files Browse the repository at this point in the history
Signed-off-by: Wenqi Qiu <[email protected]>
  • Loading branch information
wenqiq committed Mar 22, 2024
1 parent 14946d3 commit 7f3206c
Show file tree
Hide file tree
Showing 16 changed files with 155 additions and 294 deletions.
2 changes: 1 addition & 1 deletion test/performance/assets/service/server_pod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ spec:
- -f
image: busybox
imagePullPolicy: IfNotPresent
name: antrea-scale-test-client
name: antrea-scale-test-server
resources:
limits:
cpu: 20m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func RestartController(ctx context.Context, ch chan time.Duration, data *ScaleDa
prober := fmt.Sprintf("%s:%d", "", antreaapis.AntreaControllerAPIPort)

var clientPod *corev1.Pod
clientPod, err = client_pod.CreatePod(ctx, data.kubernetesClientSet, []string{prober}, client_pod.ScaleClientPodControllerProbeContainer)
clientPod, err = client_pod.CreatePod(ctx, data.kubernetesClientSet, []string{prober}, client_pod.ScaleClientPodControllerProbeContainer, client_pod.ClientPodsNamespace)
if err != nil {
klog.ErrorS(err, "Create client test Pod failed")
return
Expand Down
79 changes: 77 additions & 2 deletions test/performance/framework/client_pod/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ const (
ScaleClientPodServerContainer = "client-pod-server"
ScaleClientPodProbeContainer = "networkpolicy-client-probe"
ScaleClientPodControllerProbeContainer = "controller-client-probe"
ScaleTestPodProbeContainerName = "antrea-scale-client-pod-probe"
)

func CreatePod(ctx context.Context, kClient kubernetes.Interface, probes []string, containerName string) (*corev1.Pod, error) {
func CreatePod(ctx context.Context, kClient kubernetes.Interface, probes []string, containerName, namespace string) (*corev1.Pod, error) {
var err error
var newPod *corev1.Pod
namespace := ClientPodsNamespace
podName := ScaleTestClientPodNamePrefix + uuid.New().String()[:6]
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
newPod = &corev1.Pod{
Expand Down Expand Up @@ -110,3 +110,78 @@ func CreatePod(ctx context.Context, kClient kubernetes.Interface, probes []strin
klog.InfoS("Create Client Pod successfully!")
return newPod, nil
}

//
// func CreateClientPod(ctx context.Context, kClient kubernetes.Interface, namespace, podName string, probes []string, containerName string) (*corev1.Pod, error) {
// var err error
// expectContainerNum := 0
// var newPod *corev1.Pod
// err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
// pod, err := kClient.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
// if err != nil {
// return err
// }
// var containers []corev1.Container
// for _, probe := range probes {
// l := strings.Split(probe, ":")
// server, port := l[0], l[1]
// if server == "" {
// server = "$NODE_IP"
// }
//
// containers = append(containers, corev1.Container{
// Name: containerName,
// Image: "busybox",
// // read up rest </proc/uptime; t1="${up%.*}${up#*.}"
// Command: []string{"/bin/sh", "-c", fmt.Sprintf("server=%s; output_file=\"ping_log.txt\"; if [ ! -e \"$output_file\" ]; then touch \"$output_file\"; fi; last_status=\"unknown\"; last_change_time=$(adjtimex | awk '/(time.tv_sec|time.tv_usec):/ { printf(\"%%06d\", $2) }' && printf \"\\n\"); while true; do current_time=$(adjtimex | awk '/(time.tv_sec|time.tv_usec):/ { printf(\"%%06d\", $2) }' && printf \"\\n\"); status=$(nc -vz -w 1 \"$server\" %s > /dev/null && echo \"up\" || echo \"down\"); time_diff=$((current_time - last_change_time)); if [ \"$status\" != \"$last_status\" ]; then echo \"$current_time Status changed from $last_status to $status after ${time_diff} nanoseconds\"; echo \"$current_time Status changed from $last_status to $status after ${time_diff} nanoseconds\" >> \"$output_file\"; last_change_time=$current_time; last_status=$status; fi; sleep 0.1; done\n", server, port)},
// ImagePullPolicy: corev1.PullIfNotPresent,
// Env: []corev1.EnvVar{
// {
// Name: "NODE_IP",
// ValueFrom: &corev1.EnvVarSource{
// FieldRef: &corev1.ObjectFieldSelector{
// FieldPath: "status.hostIP",
// },
// },
// },
// },
// })
// }
//
// pod.Spec.Containers = append(pod.Spec.Containers, containers...)
// expectContainerNum = len(pod.Spec.Containers)
//
// newPod = &corev1.Pod{
// ObjectMeta: metav1.ObjectMeta{
// Name: strings.Replace(pod.Name, "server", "client", 1),
// Namespace: pod.Namespace,
// Labels: pod.Labels,
// },
// Spec: pod.Spec,
// }
//
// _, err = kClient.CoreV1().Pods(namespace).Create(ctx, newPod, metav1.CreateOptions{})
// return err
// })
// if err != nil {
// return nil, err
// }
//
// err = wait.PollWithContext(ctx, 3*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
// pod, err := kClient.CoreV1().Pods(namespace).Get(ctx, newPod.Name, metav1.GetOptions{})
// if err != nil {
// return false, err
// }
//
// if expectContainerNum == len(pod.Spec.Containers) && pod.Status.Phase == corev1.PodRunning {
// return true, nil
// }
// return false, nil
// })
//
// if err != nil {
// return nil, err
// }
// klog.InfoS("Create Client Pod successfully!")
// return newPod, nil
// }
15 changes: 0 additions & 15 deletions test/performance/framework/component/agent/scale_down.go

This file was deleted.

15 changes: 0 additions & 15 deletions test/performance/framework/component/agent/scale_up.go

This file was deleted.

15 changes: 0 additions & 15 deletions test/performance/framework/component/controller/scale_down.go

This file was deleted.

15 changes: 0 additions & 15 deletions test/performance/framework/component/controller/scale_up.go

This file was deleted.

2 changes: 1 addition & 1 deletion test/performance/framework/networkpolicy/scale_up.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func ScaleUp(ctx context.Context, cs kubernetes.Interface, nss []string, numPerN
klog.ErrorS(err, "selectServerPod")
return 0, fmt.Errorf("select server Pod error: %+v", err)
}
clientPod, err = client_pod.CreatePod(ctx, cs, []string{fmt.Sprintf("%s:%d", serverIP, 80)}, client_pod.ScaleClientPodProbeContainer)
clientPod, err = client_pod.CreatePod(ctx, cs, []string{fmt.Sprintf("%s:%d", serverIP, 80)}, client_pod.ScaleClientPodProbeContainer, client_pod.ClientPodsNamespace)
if err != nil {
klog.ErrorS(err, "Create client test Pod failed")
return 0, fmt.Errorf("create client test Pod failed: %+v", err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,17 @@
package framework

import (
"bytes"
"context"
"fmt"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"os"
"path"
"time"

"github.com/google/uuid"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
Expand All @@ -41,62 +44,48 @@ const (
workloadPodLabelValue = ""
)

var (
workloadPodContainer = corev1.Container{
Name: client_pod.ScaleClientContainerName,
Image: "busybox",
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"httpd", "-f"},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("64Mi"),
corev1.ResourceCPU: resource.MustParse("20m"),
},
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("32Mi"),
corev1.ResourceCPU: resource.MustParse("10m"),
},
},
func unmarshallServerPod(yamlFile string) (*corev1.Pod, error) {
klog.InfoS("ReadYamlFile", "yamlFile", yamlFile)
podBytes, err := os.ReadFile(yamlFile)
if err != nil {
return nil, fmt.Errorf("error reading YAML file: %+v", err)
}
)
pod := &corev1.Pod{}

func workloadPodTemplate(podName, ns string, labels map[string]string, onRealNode bool) *corev1.Pod {
var affinity *corev1.Affinity
var tolerations []corev1.Toleration
if onRealNode {
affinity = &client_pod.RealNodeAffinity
tolerations = append(tolerations, client_pod.MasterToleration)
} else {
affinity = &client_pod.SimulateAffinity
tolerations = append(tolerations, client_pod.SimulateToleration)
}
labels[workloadPodLabelKey] = workloadPodLabelValue
labels["name"] = podName
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: labels,
},
Spec: corev1.PodSpec{
Affinity: affinity,
Containers: []corev1.Container{workloadPodContainer},
RestartPolicy: corev1.RestartPolicyNever,
Tolerations: tolerations,
},
decoder := yamlutil.NewYAMLOrJSONDecoder(bytes.NewReader(podBytes), 100)

if err := decoder.Decode(pod); err != nil {
return nil, fmt.Errorf("error decoding YAML file: %+v", err)
}
return pod, nil
}

func newWorkloadPod(podName, ns string, onRealNode bool, labelNum int) *corev1.Pod {
labels := map[string]string{
client_pod.AppLabelKey: client_pod.AppLabelValue,
"namespace": ns,
fmt.Sprintf("%s%d", utils.SelectorLabelKeySuffix, labelNum): fmt.Sprintf("%s%d", utils.SelectorLabelValueSuffix, labelNum),
func renderServerPods(templatePath string, ns string, num, serviceNum int) (serverPods []*corev1.Pod, err error) {
yamlFile := path.Join(templatePath, "service/server_pod.yaml")
podTemplate, err := unmarshallServerPod(yamlFile)
if err != nil {
err = fmt.Errorf("error reading Service template: %+v", err)
return
}
if onRealNode {
labels[utils.PodOnRealNodeLabelKey] = ""

for i := 0; i < num; i++ {
labelNum := i % serviceNum
podName := fmt.Sprintf("antrea-scale-test-pod-server-%s", uuid.New().String()[:8])
serverPod := &corev1.Pod{Spec: podTemplate.Spec}
serverPod.Name = podName
serverPod.Namespace = ns
serverPod.Labels = map[string]string{
"name": podName,
utils.PodOnRealNodeLabelKey: "",
client_pod.AppLabelKey: client_pod.AppLabelValue,
workloadPodLabelKey: workloadPodLabelValue,
fmt.Sprintf("%s%d", utils.SelectorLabelKeySuffix, labelNum): fmt.Sprintf("%s%d", utils.SelectorLabelValueSuffix, labelNum),
}
serverPod.Spec.Affinity = &client_pod.RealNodeAffinity
serverPods = append(serverPods, serverPod)
}
return workloadPodTemplate(podName, ns, labels, onRealNode)

return
}

func ScaleUpWorkloadPods(ctx context.Context, ch chan time.Duration, data *ScaleData) (res ScaleResult) {
Expand All @@ -112,27 +101,24 @@ func ScaleUpWorkloadPods(ctx context.Context, ch chan time.Duration, data *Scale
start := time.Now()
podNum := data.Specification.PodsNumPerNs
res.scaleNum = len(data.namespaces) * podNum
serviceNumPerNs := data.Specification.SvcNumPerNs
count := 0
for _, ns := range data.namespaces {
gErr, _ := errgroup.WithContext(context.Background())
for i := 0; i < podNum; i++ {
// index := i
time.Sleep(time.Duration(utils.GenRandInt()%100) * time.Millisecond)
labelNum := i/2 + 1
var pods []*corev1.Pod
pods, err = renderServerPods(data.templateFilesPath, ns, podNum, serviceNumPerNs)
if err != nil {
return
}
for _, pod := range pods {
gErr.Go(func() error {
podName := fmt.Sprintf("antrea-scale-test-pod-server-%s", uuid.New().String()[:8])
pod := newWorkloadPod(podName, ns, true, labelNum)
// if !data.Specification.RealNode {
// onRealNode := (index % data.nodesNum) >= data.simulateNodesNum
// pod = newWorkloadPod(podName, ns, onRealNode, labelNum)
// }
if _, err := data.kubernetesClientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil {

Check failure on line 115 in test/performance/framework/server_pod.go

View workflow job for this annotation

GitHub Actions / Golangci-lint (ubuntu-latest)

loopclosure: loop variable pod captured by func literal (govet)

Check failure on line 115 in test/performance/framework/server_pod.go

View workflow job for this annotation

GitHub Actions / Golangci-lint (macos-latest)

loopclosure: loop variable pod captured by func literal (govet)
return err
}
return nil
})
}
klog.V(2).InfoS("Create workload Pods", "PodNum", podNum, "Namespace", ns)
klog.V(2).InfoS("Create workload Pods", "PodNum", podNum, "Namespace", ns, "Pods", pods)
if err = gErr.Wait(); err != nil {
return
}
Expand Down
Loading

0 comments on commit 7f3206c

Please sign in to comment.