From eea019c40d914a4464f72d5bfad95f94711a966a Mon Sep 17 00:00:00 2001 From: Abner-1 Date: Wed, 4 Sep 2024 21:02:03 +0800 Subject: [PATCH] merge into default handler and add uts/e2e Signed-off-by: Abner-1 --- .github/workflows/e2e-1.18.yaml | 2 +- .github/workflows/e2e-1.24.yaml | 2 +- .github/workflows/e2e-1.28.yaml | 101 +- Makefile | 6 + apis/apps/pub/inplace_update.go | 14 +- apis/apps/pub/zz_generated.deepcopy.go | 2 +- apis/apps/v1alpha1/cloneset_types.go | 6 +- .../crd/bases/apps.kruise.io_clonesets.yaml | 6 + config/manager/manager.yaml | 4 +- .../cloneset/cloneset_event_handler.go | 7 + pkg/controller/cloneset/core/cloneset_core.go | 11 + .../cloneset/sync/cloneset_sync_utils.go | 7 + .../container_meta_controller.go | 5 +- .../kuberuntime/kuberuntime_container.go | 15 +- pkg/daemon/kuberuntime/labels.go | 5 + pkg/util/inplaceupdate/inplace_update.go | 5 +- .../inplaceupdate/inplace_update_defaults.go | 580 +++------ .../inplace_update_defaults_test.go | 1139 +++++++++++++++++ pkg/util/inplaceupdate/inplace_update_test.go | 5 + .../inplaceupdate/inplace_update_vertical.go | 191 ++- .../inplace_update_vertical_test.go | 445 +++++++ .../pod/validating/pod_unavailable_budget.go | 5 +- pkg/webhook/pod/validating/workloadspread.go | 3 +- test/e2e/apps/daemonset.go | 25 +- test/e2e/apps/inplace_vpa.go | 1106 ++++++++++++++++ test/e2e/framework/cloneset_util.go | 18 +- test/e2e/framework/framework.go | 8 +- test/e2e/framework/statefulset_utils.go | 10 + ...f-none-fg.yaml => kind-conf-with-vpa.yaml} | 4 +- tools/hack/create-cluster.sh | 2 +- tools/hack/run-kruise-e2e-test.sh | 5 +- 31 files changed, 3247 insertions(+), 497 deletions(-) create mode 100644 pkg/util/inplaceupdate/inplace_update_vertical_test.go create mode 100644 test/e2e/apps/inplace_vpa.go rename test/{kind-conf-none-fg.yaml => kind-conf-with-vpa.yaml} (64%) diff --git a/.github/workflows/e2e-1.18.yaml b/.github/workflows/e2e-1.18.yaml index 10f22b8581..e515f5d35e 100644 --- a/.github/workflows/e2e-1.18.yaml +++ b/.github/workflows/e2e-1.18.yaml @@ -34,7 +34,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf.yaml version: ${{ env.KIND_VERSION }} - name: Install-CSI run: | diff --git a/.github/workflows/e2e-1.24.yaml b/.github/workflows/e2e-1.24.yaml index c86adbdee6..9b468553bc 100644 --- a/.github/workflows/e2e-1.24.yaml +++ b/.github/workflows/e2e-1.24.yaml @@ -35,7 +35,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf.yaml version: ${{ env.KIND_VERSION }} - name: Install-CSI run: | diff --git a/.github/workflows/e2e-1.28.yaml b/.github/workflows/e2e-1.28.yaml index aafa4dddbf..84bb91b08b 100644 --- a/.github/workflows/e2e-1.28.yaml +++ b/.github/workflows/e2e-1.28.yaml @@ -34,7 +34,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Install-CSI run: | @@ -117,7 +117,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -196,7 +196,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -288,7 +288,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -380,7 +380,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -472,7 +472,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -542,7 +542,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -592,6 +592,89 @@ jobs: done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}') fi exit $retVal + clonesetAndInplace: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf-with-vpa.yaml + version: ${{ env.KIND_VERSION }} + - name: Install-CSI + run: | + make install-csi + + - name: Build image + run: | + export IMAGE="openkruise/kruise-manager:e2e-${GITHUB_RUN_ID}" + docker build --pull --no-cache . -t $IMAGE + kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; } + - name: Install Kruise + run: | + set -ex + kubectl cluster-info + IMG=openkruise/kruise-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh + NODES=$(kubectl get node | wc -l) + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l) + set -e + if [ "$PODS" -eq "$NODES" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | wc -l) + kubectl get node -o yaml + kubectl get all -n kruise-system -o yaml + kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system + kubectl get pod -n kruise-system --no-headers | grep daemon | awk '{print $1}' | xargs kubectl logs -n kruise-system --previous=true + set -e + if [ "$PODS" -eq "$NODES" ]; then + echo "Wait for kruise-manager and kruise-daemon ready successfully" + else + echo "Timeout to wait for kruise-manager and kruise-daemon ready" + exit 1 + fi + - name: Run E2E Tests + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -p -timeout 120m -v --focus='\[apps\] (InplaceVPA)' test/e2e + retVal=$? + restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-manager has not restarted" + else + kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers + echo "Kruise-manager has restarted, abort!!!" + kubectl get pod -n kruise-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kruise-system + exit 1 + fi + if [ "$retVal" -ne 0 ];then + echo "test fail, dump kruise-manager logs" + while read pod; do + kubectl logs -n kruise-system $pod + done < <(kubectl get pods -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $1}') + echo "test fail, dump kruise-daemon logs" + while read pod; do + kubectl logs -n kruise-system $pod + done < <(kubectl get pods -n kruise-system -l control-plane=daemon --no-headers | awk '{print $1}') + fi + exit $retVal + other: runs-on: ubuntu-20.04 steps: @@ -607,7 +690,7 @@ jobs: with: node_image: ${{ env.KIND_IMAGE }} cluster_name: ${{ env.KIND_CLUSTER_NAME }} - config: ./test/kind-conf-none-fg.yaml + config: ./test/kind-conf-with-vpa.yaml version: ${{ env.KIND_VERSION }} - name: Build image run: | @@ -648,7 +731,7 @@ jobs: export KUBECONFIG=/home/runner/.kube/config make ginkgo set +e - ./bin/ginkgo -timeout 90m -v --skip='\[apps\] (AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e + ./bin/ginkgo -timeout 90m -v --skip='\[apps\] (InplaceVPA|AppStatefulSetStorage|StatefulSet|PullImage|PullImages|ContainerRecreateRequest|DaemonSet|SidecarSet|EphemeralJob)' --skip='\[policy\] PodUnavailableBudget' test/e2e retVal=$? restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}') if [ "${restartCount}" -eq "0" ];then diff --git a/Makefile b/Makefile index f52adf2803..9074fd90b9 100644 --- a/Makefile +++ b/Makefile @@ -152,9 +152,15 @@ endif create-cluster: $(tools/kind) tools/hack/create-cluster.sh +DISABLE_CSI ?= false + .PHONY: install-csi install-csi: +ifeq ($(DISABLE_CSI), true) + @echo "CSI is disabled, skip" +else cd tools/hack/csi-driver-host-path; ./install-snapshot.sh +endif # delete-cluster deletes a kube cluster. .PHONY: delete-cluster diff --git a/apis/apps/pub/inplace_update.go b/apis/apps/pub/inplace_update.go index a26cf1a886..591e6c7ee2 100644 --- a/apis/apps/pub/inplace_update.go +++ b/apis/apps/pub/inplace_update.go @@ -62,6 +62,12 @@ type InPlaceUpdateState struct { // UpdateEnvFromMetadata indicates there are envs from annotations/labels that should be in-place update. UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + // UpdateResources indicates there are resources that should be in-place update. + UpdateResources bool `json:"updateResources,omitempty"` + + // VerticalUpdateOnly indicates there are only vertical update in this revision. + VerticalUpdateOnly bool `json:"verticalUpdateOnly"` + // NextContainerImages is the containers with lower priority that waiting for in-place update images in next batch. NextContainerImages map[string]string `json:"nextContainerImages,omitempty"` @@ -94,8 +100,8 @@ type InPlaceUpdateContainerBatch struct { // InPlaceUpdateContainerStatus records the statuses of the container that are mainly used // to determine whether the InPlaceUpdate is completed. type InPlaceUpdateContainerStatus struct { - ImageID string `json:"imageID,omitempty"` - Resource v1.ResourceRequirements `json:"resource,omitempty"` + ImageID string `json:"imageID,omitempty"` + Resources v1.ResourceRequirements `json:"resources,omitempty"` } // InPlaceUpdateStrategy defines the strategies for in-place update. @@ -144,6 +150,10 @@ type RuntimeContainerHashes struct { // PlainHash is the hash that directly calculated from pod.spec.container[x]. // Usually it is calculated by Kubelet and will be in annotation of each runtime container. PlainHash uint64 `json:"plainHash"` + // PlainHashWithoutResources is the hash that directly calculated from pod.spec.container[x] + // over fields with Resources field zero'd out. + // Usually it is calculated by Kubelet and will be in annotation of each runtime container. + PlainHashWithoutResources uint64 `json:"plainHashWithoutResources"` // ExtractedEnvFromMetadataHash is the hash that calculated from pod.spec.container[x], // whose envs from annotations/labels have already been extracted to the real values. ExtractedEnvFromMetadataHash uint64 `json:"extractedEnvFromMetadataHash,omitempty"` diff --git a/apis/apps/pub/zz_generated.deepcopy.go b/apis/apps/pub/zz_generated.deepcopy.go index 6565bf4f7d..4f68f626db 100644 --- a/apis/apps/pub/zz_generated.deepcopy.go +++ b/apis/apps/pub/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *InPlaceUpdateContainerBatch) DeepCopy() *InPlaceUpdateContainerBatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InPlaceUpdateContainerStatus) DeepCopyInto(out *InPlaceUpdateContainerStatus) { *out = *in - in.Resource.DeepCopyInto(&out.Resource) + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateContainerStatus. diff --git a/apis/apps/v1alpha1/cloneset_types.go b/apis/apps/v1alpha1/cloneset_types.go index 860f6ae9c9..0b09ea7e69 100644 --- a/apis/apps/v1alpha1/cloneset_types.go +++ b/apis/apps/v1alpha1/cloneset_types.go @@ -17,10 +17,11 @@ limitations under the License. package v1alpha1 import ( - appspub "github.com/openkruise/kruise/apis/apps/pub" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + appspub "github.com/openkruise/kruise/apis/apps/pub" ) const ( @@ -178,6 +179,8 @@ type CloneSetStatus struct { // UpdatedAvailableReplicas is the number of Pods created by the CloneSet controller from the CloneSet version // indicated by updateRevision and have a Ready Condition for at least minReadySeconds. + // Notice: when enable InPlaceWorkloadVerticalScaling, only resource resize updating pod will also be unavailable. + // This means these pod will be counted in maxUnavailable. UpdatedAvailableReplicas int32 `json:"updatedAvailableReplicas,omitempty"` // ExpectedUpdatedReplicas is the number of Pods that should be updated by CloneSet controller. @@ -237,6 +240,7 @@ type CloneSetCondition struct { // +kubebuilder:printcolumn:name="DESIRED",type="integer",JSONPath=".spec.replicas",description="The desired number of pods." // +kubebuilder:printcolumn:name="UPDATED",type="integer",JSONPath=".status.updatedReplicas",description="The number of pods updated." // +kubebuilder:printcolumn:name="UPDATED_READY",type="integer",JSONPath=".status.updatedReadyReplicas",description="The number of pods updated and ready." +// +kubebuilder:printcolumn:name="UPDATED_AVAILABLE",type="integer",JSONPath=".status.updatedAvailableReplicas",description="The number of pods updated and available." // +kubebuilder:printcolumn:name="READY",type="integer",JSONPath=".status.readyReplicas",description="The number of pods ready." // +kubebuilder:printcolumn:name="TOTAL",type="integer",JSONPath=".status.replicas",description="The number of currently all pods." // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." diff --git a/config/crd/bases/apps.kruise.io_clonesets.yaml b/config/crd/bases/apps.kruise.io_clonesets.yaml index 0be49002ff..d901d37d5d 100644 --- a/config/crd/bases/apps.kruise.io_clonesets.yaml +++ b/config/crd/bases/apps.kruise.io_clonesets.yaml @@ -29,6 +29,10 @@ spec: jsonPath: .status.updatedReadyReplicas name: UPDATED_READY type: integer + - description: The number of pods updated and available. + jsonPath: .status.updatedAvailableReplicas + name: UPDATED_AVAILABLE + type: integer - description: The number of pods ready. jsonPath: .status.readyReplicas name: READY @@ -512,6 +516,8 @@ spec: description: |- UpdatedAvailableReplicas is the number of Pods created by the CloneSet controller from the CloneSet version indicated by updateRevision and have a Ready Condition for at least minReadySeconds. + Notice: when enable InPlaceWorkloadVerticalScaling, only resource resize updating pod will also be unavailable. + This means these pod will be counted in maxUnavailable. format: int32 type: integer updatedReadyReplicas: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6aacbf55f8..4544cf9f8b 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -60,8 +60,8 @@ spec: port: 8000 resources: limits: - cpu: 100m - memory: 200Mi + cpu: 2 + memory: 2Gi requests: cpu: 100m memory: 200Mi diff --git a/pkg/controller/cloneset/cloneset_event_handler.go b/pkg/controller/cloneset/cloneset_event_handler.go index 1c2ad58fad..d618f13d2b 100644 --- a/pkg/controller/cloneset/cloneset_event_handler.go +++ b/pkg/controller/cloneset/cloneset_event_handler.go @@ -138,6 +138,13 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w // If it has a ControllerRef, that's all that matters. if curControllerRef != nil { + // TODO(Abner-1): delete it when fixes only resize resource + //old, _ := json.Marshal(oldPod) + //cur, _ := json.Marshal(curPod) + //patches, _ := jsonpatch.CreatePatch(old, cur) + //pjson, _ := json.Marshal(patches) + //klog.V(4).InfoS("Pod updated json", "pod", klog.KObj(curPod), "patch", pjson) + req := resolveControllerRef(curPod.Namespace, curControllerRef) if req == nil { return diff --git a/pkg/controller/cloneset/core/cloneset_core.go b/pkg/controller/cloneset/core/cloneset_core.go index 5c26b22861..deeb88e061 100644 --- a/pkg/controller/cloneset/core/cloneset_core.go +++ b/pkg/controller/cloneset/core/cloneset_core.go @@ -32,6 +32,8 @@ import ( appspub "github.com/openkruise/kruise/apis/apps/pub" appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" clonesetutils "github.com/openkruise/kruise/pkg/controller/cloneset/utils" + "github.com/openkruise/kruise/pkg/features" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" "github.com/openkruise/kruise/pkg/util/inplaceupdate" ) @@ -211,6 +213,15 @@ func (c *commonControl) IgnorePodUpdateEvent(oldPod, curPod *v1.Pod) bool { } } } + // only inplace resource resize + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) && + len(curPod.Labels) > 0 && appspub.LifecycleStateType(curPod.Labels[appspub.LifecycleStateKey]) != appspub.LifecycleStateNormal { + opts := c.GetUpdateOptions() + opts = inplaceupdate.SetOptionsDefaults(opts) + if err := containersUpdateCompleted(curPod, opts.CheckContainersUpdateCompleted); err == nil { + return false + } + } return true } diff --git a/pkg/controller/cloneset/sync/cloneset_sync_utils.go b/pkg/controller/cloneset/sync/cloneset_sync_utils.go index b26aebb5ce..ee7014735e 100644 --- a/pkg/controller/cloneset/sync/cloneset_sync_utils.go +++ b/pkg/controller/cloneset/sync/cloneset_sync_utils.go @@ -17,6 +17,7 @@ limitations under the License. package sync import ( + "encoding/json" "flag" "math" "reflect" @@ -87,6 +88,12 @@ func (e expectationDiffs) isEmpty() bool { return reflect.DeepEqual(e, expectationDiffs{}) } +// String implement this to print information in klog +func (e expectationDiffs) String() string { + b, _ := json.Marshal(e) + return string(b) +} + type IsPodUpdateFunc func(pod *v1.Pod, updateRevision string) bool // This is the most important algorithm in cloneset-controller. diff --git a/pkg/daemon/containermeta/container_meta_controller.go b/pkg/daemon/containermeta/container_meta_controller.go index 7ba912166d..dbaf5e748e 100644 --- a/pkg/daemon/containermeta/container_meta_controller.go +++ b/pkg/daemon/containermeta/container_meta_controller.go @@ -344,7 +344,10 @@ func (c *Controller) manageContainerMetaSet(pod *v1.Pod, kubePodStatus *kubeletc Name: status.Name, ContainerID: status.ID.String(), RestartCount: int32(status.RestartCount), - Hashes: appspub.RuntimeContainerHashes{PlainHash: status.Hash}, + Hashes: appspub.RuntimeContainerHashes{ + PlainHash: status.Hash, + PlainHashWithoutResources: status.HashWithoutResources, + }, } } if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceUpdateEnvFromMetadata) { diff --git a/pkg/daemon/kuberuntime/kuberuntime_container.go b/pkg/daemon/kuberuntime/kuberuntime_container.go index 7e15b1772a..45a4082aff 100644 --- a/pkg/daemon/kuberuntime/kuberuntime_container.go +++ b/pkg/daemon/kuberuntime/kuberuntime_container.go @@ -92,13 +92,14 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin Type: runtimeName, ID: status.Id, }, - Name: labeledInfo.ContainerName, - Image: status.Image.Image, - ImageID: status.ImageRef, - Hash: annotatedInfo.Hash, - RestartCount: annotatedInfo.RestartCount, - State: toKubeContainerState(status.State), - CreatedAt: time.Unix(0, status.CreatedAt), + Name: labeledInfo.ContainerName, + Image: status.Image.Image, + ImageID: status.ImageRef, + Hash: annotatedInfo.Hash, + HashWithoutResources: annotatedInfo.HashWithoutResources, + RestartCount: annotatedInfo.RestartCount, + State: toKubeContainerState(status.State), + CreatedAt: time.Unix(0, status.CreatedAt), } if status.State != runtimeapi.ContainerState_CONTAINER_CREATED { diff --git a/pkg/daemon/kuberuntime/labels.go b/pkg/daemon/kuberuntime/labels.go index 732af841e8..aec5f165a7 100644 --- a/pkg/daemon/kuberuntime/labels.go +++ b/pkg/daemon/kuberuntime/labels.go @@ -33,6 +33,7 @@ const ( podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod" containerHashLabel = "io.kubernetes.container.hash" + containerHashWithoutResourcesLabel = "io.kubernetes.container.hashWithoutResources" containerRestartCountLabel = "io.kubernetes.container.restartCount" containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath" containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy" @@ -49,6 +50,7 @@ type labeledContainerInfo struct { type annotatedContainerInfo struct { Hash uint64 + HashWithoutResources uint64 RestartCount int PodDeletionGracePeriod *int64 PodTerminationGracePeriod *int64 @@ -79,6 +81,9 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil { klog.ErrorS(err, "Unable to get label from annotations", "label", containerHashLabel, "annotations", annotations) } + if containerInfo.HashWithoutResources, err = getUint64ValueFromLabel(annotations, containerHashWithoutResourcesLabel); err != nil { + klog.ErrorS(err, "Unable to get label from annotations", "label", containerHashWithoutResourcesLabel, "annotations", annotations) + } if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil { klog.ErrorS(err, "Unable to get label from annotations", "label", containerRestartCountLabel, "annotations", annotations) } diff --git a/pkg/util/inplaceupdate/inplace_update.go b/pkg/util/inplaceupdate/inplace_update.go index 1dde061000..1eb0ee427f 100644 --- a/pkg/util/inplaceupdate/inplace_update.go +++ b/pkg/util/inplaceupdate/inplace_update.go @@ -69,6 +69,7 @@ type UpdateOptions struct { PatchSpecToPod func(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) CheckPodUpdateCompleted func(pod *v1.Pod) error CheckContainersUpdateCompleted func(pod *v1.Pod, state *appspub.InPlaceUpdateState) error + CheckPodNeedsBeUnready func(pod *v1.Pod, spec *UpdateSpec) bool GetRevision func(rev *apps.ControllerRevision) string } @@ -291,7 +292,7 @@ func (c *realControl) Update(pod *v1.Pod, oldRevision, newRevision *apps.Control // 2. update condition for pod with readiness-gate // When only workload resources are updated, they are marked as not needing to remove traffic - if !spec.VerticalUpdateOnly && containsReadinessGate(pod) { + if opts.CheckPodNeedsBeUnready(pod, spec) { newCondition := v1.PodCondition{ Type: appspub.InPlaceUpdateReady, LastTransitionTime: metav1.NewTime(Clock.Now()), @@ -337,6 +338,8 @@ func (c *realControl) updatePodInPlace(pod *v1.Pod, spec *UpdateSpec, opts *Upda Revision: spec.Revision, UpdateTimestamp: metav1.NewTime(Clock.Now()), UpdateEnvFromMetadata: spec.UpdateEnvFromMetadata, + VerticalUpdateOnly: spec.VerticalUpdateOnly, + UpdateResources: len(spec.ContainerResources) > 0, } inPlaceUpdateStateJSON, _ := json.Marshal(inPlaceUpdateState) clone.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) diff --git a/pkg/util/inplaceupdate/inplace_update_defaults.go b/pkg/util/inplaceupdate/inplace_update_defaults.go index fe4c5b2d85..c96863fc5b 100644 --- a/pkg/util/inplaceupdate/inplace_update_defaults.go +++ b/pkg/util/inplaceupdate/inplace_update_defaults.go @@ -34,7 +34,6 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -47,40 +46,24 @@ func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { opts = &UpdateOptions{} } - if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { - registerVerticalUpdate() - - if opts.CalculateSpec == nil { - opts.CalculateSpec = defaultCalculateInPlaceUpdateSpecWithVerticalUpdate - } - - if opts.PatchSpecToPod == nil { - opts.PatchSpecToPod = defaultPatchUpdateSpecToPodWithVerticalUpdate - } - - if opts.CheckPodUpdateCompleted == nil { - opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompletedWithVerticalUpdate - } + if opts.CalculateSpec == nil { + opts.CalculateSpec = defaultCalculateInPlaceUpdateSpec + } - if opts.CheckContainersUpdateCompleted == nil { - opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate - } - } else { - if opts.CalculateSpec == nil { - opts.CalculateSpec = defaultCalculateInPlaceUpdateSpec - } + if opts.PatchSpecToPod == nil { + opts.PatchSpecToPod = defaultPatchUpdateSpecToPod + } - if opts.PatchSpecToPod == nil { - opts.PatchSpecToPod = defaultPatchUpdateSpecToPod - } + if opts.CheckPodUpdateCompleted == nil { + opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted + } - if opts.CheckPodUpdateCompleted == nil { - opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted - } + if opts.CheckContainersUpdateCompleted == nil { + opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted + } - if opts.CheckContainersUpdateCompleted == nil { - opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted - } + if opts.CheckPodNeedsBeUnready == nil { + opts.CheckPodNeedsBeUnready = defaultCheckPodNeedsBeUnready } return opts @@ -88,7 +71,6 @@ func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { // defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) { - klog.V(5).InfoS("Begin to in-place update pod", "namespace", pod.Namespace, "name", pod.Name, "spec", util.DumpJSON(spec), "state", util.DumpJSON(state)) state.NextContainerImages = make(map[string]string) @@ -122,7 +104,8 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.I c := &pod.Spec.Containers[i] _, existImage := spec.ContainerImages[c.Name] _, existMetadata := spec.ContainerRefMetadata[c.Name] - if !existImage && !existMetadata { + _, existResource := spec.ContainerResources[c.Name] + if !existImage && !existMetadata && !existResource { continue } priority := utilcontainerlaunchpriority.GetContainerPriority(c) @@ -144,28 +127,61 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.I // update images and record current imageIDs for the containers to update containersImageChanged := sets.NewString() + containersResourceChanged := sets.NewString() for i := range pod.Spec.Containers { c := &pod.Spec.Containers[i] - newImage, exists := spec.ContainerImages[c.Name] - if !exists { + newImage, imageExists := spec.ContainerImages[c.Name] + newResource, resourceExists := spec.ContainerResources[c.Name] + if !imageExists && !resourceExists { continue } if containersToUpdate.Has(c.Name) { - pod.Spec.Containers[i].Image = newImage - containersImageChanged.Insert(c.Name) + if imageExists { + pod.Spec.Containers[i].Image = newImage + containersImageChanged.Insert(c.Name) + } + + if resourceExists && utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + verticalUpdateOperator.UpdateContainerResource(c, &newResource) + containersResourceChanged.Insert(c.Name) + } } else { - state.NextContainerImages[c.Name] = newImage + if imageExists { + state.NextContainerImages[c.Name] = newImage + } + if resourceExists && utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + state.NextContainerResources[c.Name] = newResource + } } } + for _, c := range pod.Status.ContainerStatuses { if containersImageChanged.Has(c.Name) { if state.LastContainerStatuses == nil { state.LastContainerStatuses = map[string]appspub.InPlaceUpdateContainerStatus{} } - state.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ImageID: c.ImageID} + if cs, ok := state.LastContainerStatuses[c.Name]; !ok { + state.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ImageID: c.ImageID} + } else { + // now just update imageID + cs.ImageID = c.ImageID + } + } + if containersResourceChanged.Has(c.Name) && utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + verticalUpdateOperator.SyncContainerResource(&c, state) } } + // TODO: Is there a case in which we should update resource together when only resource changed? + // assume a case: container1 1G container2 2G => container1 2G container2 1G + + // This provides a hook for vertical updates + // so that internal enterprise implementations can update+sync pod resources here at once + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + verticalUpdateOperator.UpdatePodResource(pod) + verticalUpdateOperator.SyncPodResource(pod, state) + } + // update annotations and labels for the containers to update for cName, objMeta := range spec.ContainerRefMetadata { if containersToUpdate.Has(cName) { @@ -183,7 +199,7 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.I // add the containers that update this time into PreCheckBeforeNext, so that next containers can only // start to update when these containers have updated ready // TODO: currently we only support ContainersRequiredReady, not sure if we have to add ContainersPreferredReady in future - if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 { + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { state.PreCheckBeforeNext = &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: containersToUpdate.List()} } else { state.PreCheckBeforeNext = nil @@ -302,16 +318,31 @@ func defaultCalculateInPlaceUpdateSpec(oldRevision, newRevision *apps.Controller } return nil } - if op.Operation != "replace" || !containerImagePatchRexp.MatchString(op.Path) { + + if op.Operation != "replace" { return nil } - // for example: /spec/containers/0/image - words := strings.Split(op.Path, "/") - idx, _ := strconv.Atoi(words[3]) - if len(oldTemp.Spec.Containers) <= idx { - return nil + if containerImagePatchRexp.MatchString(op.Path) { + // for example: /spec/containers/0/image + words := strings.Split(op.Path, "/") + idx, _ := strconv.Atoi(words[3]) + if len(oldTemp.Spec.Containers) <= idx { + return nil + } + updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) + continue + } + + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) && + containerResourcesPatchRexp.MatchString(op.Path) { + err = verticalUpdateOperator.ValidateResourcePatch(&op, oldTemp, updateSpec) + if err != nil { + klog.InfoS("ValidateResourcePatch error", "err", err) + return nil + } + continue } - updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) + return nil } if len(metadataPatches) > 0 { @@ -368,6 +399,11 @@ func defaultCalculateInPlaceUpdateSpec(oldRevision, newRevision *apps.Controller } updateSpec.MetaDataPatch = patchBytes } + // Need to distinguish whether only resources have been updated + if len(updateSpec.ContainerResources) > 0 && len(updateSpec.ContainerImages) == 0 && !updateSpec.UpdateEnvFromMetadata { + updateSpec.VerticalUpdateOnly = true + } + return updateSpec } @@ -385,10 +421,16 @@ func DefaultCheckInPlaceUpdateCompleted(pod *v1.Pod) error { } else if err := json.Unmarshal([]byte(stateStr), &inPlaceUpdateState); err != nil { return err } - if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 { + if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 || len(inPlaceUpdateState.NextContainerResources) > 0 { return fmt.Errorf("existing containers to in-place update in next batches") } + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) { + if ok := verticalUpdateOperator.IsPodUpdateCompleted(pod); !ok { + return fmt.Errorf("waiting for pod vertical update") + } + } + return defaultCheckContainersInPlaceUpdateCompleted(pod, &inPlaceUpdateState) } @@ -407,8 +449,29 @@ func defaultCheckContainersInPlaceUpdateCompleted(pod *v1.Pod, inPlaceUpdateStat } } + // only UpdateResources, we check resources in status updated + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) && inPlaceUpdateState.UpdateResources { + containers := make(map[string]*v1.Container, len(pod.Spec.Containers)) + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + containers[c.Name] = c + } + for _, cs := range pod.Status.ContainerStatuses { + if oldStatus, ok := inPlaceUpdateState.LastContainerStatuses[cs.Name]; ok { + if !verticalUpdateOperator.IsContainerUpdateCompleted(pod, containers[cs.Name], &cs, oldStatus) { + return fmt.Errorf("container %s resources not changed", cs.Name) + } + } + } + } + if runtimeContainerMetaSet != nil { - if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, plainHash) { + metaHashType := plainHash + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) && inPlaceUpdateState.UpdateResources { + // if vertical scaling is enabled and update resources, we should compare plainHashWithoutResources + metaHashType = plainHashWithoutResources + } + if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, metaHashType) { klog.V(5).InfoS("Check Pod in-place update completed for all container hash consistent", "namespace", pod.Namespace, "name", pod.Name) return nil } @@ -448,6 +511,7 @@ type hashType string const ( plainHash hashType = "PlainHash" + plainHashWithoutResources hashType = "PlainHashWithoutResources" extractedEnvFromMetadataHash hashType = "ExtractedEnvFromMetadataHash" ) @@ -498,6 +562,15 @@ func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *apps "metaHash", containerMeta.Hashes.PlainHash, "expectedHash", expectedHash) return false } + case plainHashWithoutResources: + containerSpecCopy := containerSpec.DeepCopy() + containerSpecCopy.Resources = v1.ResourceRequirements{} + if expectedHash := kubeletcontainer.HashContainer(containerSpecCopy); containerMeta.Hashes.PlainHashWithoutResources != expectedHash { + klog.InfoS("Find container in runtime-container-meta for Pod has different plain hash with spec(except resources)", + "containerName", containerSpecCopy.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaHash", containerMeta.Hashes.PlainHashWithoutResources, "expectedHash", expectedHash) + return false + } case extractedEnvFromMetadataHash: hasher := utilcontainermeta.NewEnvFromMetadataHasher() if expectedHash := hasher.GetExpectHash(containerSpec, pod); containerMeta.Hashes.ExtractedEnvFromMetadataHash != expectedHash { @@ -512,390 +585,45 @@ func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *apps return true } -// defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod -func defaultPatchUpdateSpecToPodWithVerticalUpdate(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) { - klog.V(5).Infof("Begin to in-place update pod %s/%s with update spec %v, state %v", pod.Namespace, pod.Name, util.DumpJSON(spec), util.DumpJSON(state)) - - registerVerticalUpdate() - - state.NextContainerImages = make(map[string]string) - state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) - state.NextContainerResources = make(map[string]v1.ResourceRequirements) - - if spec.MetaDataPatch != nil { - cloneBytes, _ := json.Marshal(pod) - modified, err := strategicpatch.StrategicMergePatch(cloneBytes, spec.MetaDataPatch, &v1.Pod{}) - if err != nil { - return nil, err - } - pod = &v1.Pod{} - if err = json.Unmarshal(modified, pod); err != nil { - return nil, err - } - } - - if pod.Labels == nil { - pod.Labels = make(map[string]string) - } - if pod.Annotations == nil { - pod.Annotations = make(map[string]string) - } - - // prepare containers that should update this time and next time, according to their priorities - containersToUpdate := sets.NewString() - var highestPriority *int - var containersWithHighestPriority []string - for i := range pod.Spec.Containers { - c := &pod.Spec.Containers[i] - _, existImage := spec.ContainerImages[c.Name] - _, existMetadata := spec.ContainerRefMetadata[c.Name] - _, existResource := spec.ContainerResources[c.Name] - if !existImage && !existMetadata && !existResource { - continue - } - priority := utilcontainerlaunchpriority.GetContainerPriority(c) - if priority == nil { - containersToUpdate.Insert(c.Name) - } else if highestPriority == nil || *highestPriority < *priority { - highestPriority = priority - containersWithHighestPriority = []string{c.Name} - } else if *highestPriority == *priority { - containersWithHighestPriority = append(containersWithHighestPriority, c.Name) - } - } - for _, cName := range containersWithHighestPriority { - containersToUpdate.Insert(cName) - } - addMetadataSharedContainersToUpdate(pod, containersToUpdate, spec.ContainerRefMetadata) - - // DO NOT modify the fields in spec for it may have to retry on conflict in updatePodInPlace - - // update images and record current imageIDs for the containers to update - containersImageChanged := sets.NewString() - containersResourceChanged := sets.NewString() - for i := range pod.Spec.Containers { - c := &pod.Spec.Containers[i] - newImage, imageExists := spec.ContainerImages[c.Name] - newResource, resourceExists := spec.ContainerResources[c.Name] - if !imageExists && !resourceExists { - continue - } - if containersToUpdate.Has(c.Name) { - if imageExists { - pod.Spec.Containers[i].Image = newImage - containersImageChanged.Insert(c.Name) - } - if resourceExists { - verticalUpdateOperator.UpdateContainerResource(c, &newResource) - containersResourceChanged.Insert(c.Name) - } - } else { - state.NextContainerImages[c.Name] = newImage - state.NextContainerResources[c.Name] = newResource - } - } - - // This provides a hook for vertical updates, - // so that internal enterprise implementations can update pod resources here at once - verticalUpdateOperator.UpdatePodResource(pod) - - for _, c := range pod.Status.ContainerStatuses { - if containersImageChanged.Has(c.Name) { - if state.LastContainerStatuses == nil { - state.LastContainerStatuses = map[string]appspub.InPlaceUpdateContainerStatus{} - } - if cs, ok := state.LastContainerStatuses[c.Name]; !ok { - state.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ImageID: c.ImageID} - } else { - cs.ImageID = c.ImageID - } - } - if containersResourceChanged.Has(c.Name) { - verticalUpdateOperator.SyncContainerResource(&c, state) - } - } - - // This provides a hook for vertical updates, - // so that internal enterprise implementations can sync pod resources here at once - verticalUpdateOperator.SyncPodResource(pod, state) - - // update annotations and labels for the containers to update - for cName, objMeta := range spec.ContainerRefMetadata { - if containersToUpdate.Has(cName) { - for k, v := range objMeta.Labels { - pod.Labels[k] = v - } - for k, v := range objMeta.Annotations { - pod.Annotations[k] = v - } - } else { - state.NextContainerRefMetadata[cName] = objMeta - } - } - - // add the containers that update this time into PreCheckBeforeNext, so that next containers can only - // start to update when these containers have updated ready - // TODO: currently we only support ContainersRequiredReady, not sure if we have to add ContainersPreferredReady in future - if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { - state.PreCheckBeforeNext = &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: containersToUpdate.List()} - } else { - state.PreCheckBeforeNext = nil - } - - state.ContainerBatchesRecord = append(state.ContainerBatchesRecord, appspub.InPlaceUpdateContainerBatch{ - Timestamp: metav1.NewTime(Clock.Now()), - Containers: containersToUpdate.List(), - }) - - klog.V(5).Infof("Decide to in-place update pod %s/%s with state %v", pod.Namespace, pod.Name, util.DumpJSON(state)) - - inPlaceUpdateStateJSON, _ := json.Marshal(state) - pod.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) - return pod, nil -} - -// defaultCalculateInPlaceUpdateSpec calculates diff between old and update revisions. -// If the diff just contains replace operation of spec.containers[x].image, it will returns an UpdateSpec. -// Otherwise, it returns nil which means can not use in-place update. -func defaultCalculateInPlaceUpdateSpecWithVerticalUpdate(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec { - if oldRevision == nil || newRevision == nil { - return nil - } - opts = SetOptionsDefaults(opts) - - patches, err := jsonpatch.CreatePatch(oldRevision.Data.Raw, newRevision.Data.Raw) - if err != nil { - return nil - } - - oldTemp, err := GetTemplateFromRevision(oldRevision) - if err != nil { - return nil - } - newTemp, err := GetTemplateFromRevision(newRevision) - if err != nil { - return nil - } - - updateSpec := &UpdateSpec{ - Revision: newRevision.Name, - ContainerImages: make(map[string]string), - ContainerRefMetadata: make(map[string]metav1.ObjectMeta), - ContainerResources: make(map[string]v1.ResourceRequirements), - GraceSeconds: opts.GracePeriodSeconds, - VerticalUpdateOnly: false, - } - if opts.GetRevision != nil { - updateSpec.Revision = opts.GetRevision(newRevision) - } - - // all patches for podSpec can just update images in pod spec - var metadataPatches []jsonpatch.Operation - for _, op := range patches { - op.Path = strings.Replace(op.Path, "/spec/template", "", 1) - - if !strings.HasPrefix(op.Path, "/spec/") { - if strings.HasPrefix(op.Path, "/metadata/") { - metadataPatches = append(metadataPatches, op) - continue - } - return nil - } - - if op.Operation != "replace" { - return nil - } - - isImageUpdate := containerImagePatchRexp.MatchString(op.Path) - isResourceUpdate := containerResourcesPatchRexp.MatchString(op.Path) - - if isImageUpdate { - // for example: /spec/containers/0/image - words := strings.Split(op.Path, "/") - idx, _ := strconv.Atoi(words[3]) - if len(oldTemp.Spec.Containers) <= idx { - return nil - } - updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) - } else if isResourceUpdate { - // for example: /spec/containers/0/resources/limits/cpu - words := strings.Split(op.Path, "/") - if len(words) != 7 { - return nil - } - idx, err := strconv.Atoi(words[3]) - if err != nil || len(oldTemp.Spec.Containers) <= idx { - return nil - } - quantity, err := resource.ParseQuantity(op.Value.(string)) - if err != nil { - klog.Errorf("parse quantity error: %v", err) - return nil - } +const ( + cpuMask = 1 + memMask = 2 +) - if _, ok := updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name]; !ok { - updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name] = v1.ResourceRequirements{ - Limits: make(v1.ResourceList), - Requests: make(v1.ResourceList), - } +func defaultCheckPodNeedsBeUnready(pod *v1.Pod, spec *UpdateSpec) bool { + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceWorkloadVerticalScaling) && spec.VerticalUpdateOnly { + resourceFlag := make(map[string]int) + for c, resizeResources := range spec.ContainerResources { + flag := 0 + _, limitExist := resizeResources.Limits[v1.ResourceCPU] + _, reqExist := resizeResources.Requests[v1.ResourceCPU] + if limitExist || reqExist { + flag |= cpuMask } - switch words[5] { - case "limits": - updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Limits[v1.ResourceName(words[6])] = quantity - case "requests": - updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Requests[v1.ResourceName(words[6])] = quantity + _, limitExist = resizeResources.Limits[v1.ResourceMemory] + _, reqExist = resizeResources.Requests[v1.ResourceMemory] + if limitExist || reqExist { + flag |= memMask } - } else { - return nil - } - } - - // Need to distinguish whether only resources have been updated - if len(updateSpec.ContainerResources) > 0 && len(updateSpec.ContainerImages) == 0 { - updateSpec.VerticalUpdateOnly = true - } - - if len(metadataPatches) > 0 { - if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceUpdateEnvFromMetadata) { - // for example: /metadata/labels/my-label-key - for _, op := range metadataPatches { - if op.Operation != "replace" && op.Operation != "add" { - continue - } - words := strings.SplitN(op.Path, "/", 4) - if len(words) != 4 || (words[2] != "labels" && words[2] != "annotations") { - continue - } - key := rfc6901Decoder.Replace(words[3]) - - for i := range newTemp.Spec.Containers { - c := &newTemp.Spec.Containers[i] - objMeta := updateSpec.ContainerRefMetadata[c.Name] - switch words[2] { - case "labels": - if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.labels", key) { - continue - } - if objMeta.Labels == nil { - objMeta.Labels = make(map[string]string) - } - objMeta.Labels[key] = op.Value.(string) - delete(oldTemp.ObjectMeta.Labels, key) - delete(newTemp.ObjectMeta.Labels, key) - - case "annotations": - if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.annotations", key) { - continue - } - if objMeta.Annotations == nil { - objMeta.Annotations = make(map[string]string) - } - objMeta.Annotations[key] = op.Value.(string) - delete(oldTemp.ObjectMeta.Annotations, key) - delete(newTemp.ObjectMeta.Annotations, key) + resourceFlag[c] = flag + } + needRestart := false + for _, container := range pod.Spec.Containers { + if flag, exist := resourceFlag[container.Name]; exist { + for _, resizePolicy := range container.ResizePolicy { + if resizePolicy.RestartPolicy != v1.RestartContainer { + continue + } + if (resizePolicy.ResourceName == v1.ResourceCPU && (flag&cpuMask) != 0) || + (resizePolicy.ResourceName == v1.ResourceMemory && (flag&memMask) != 0) { + needRestart = true } - - updateSpec.ContainerRefMetadata[c.Name] = objMeta - updateSpec.UpdateEnvFromMetadata = true } } } - - oldBytes, _ := json.Marshal(v1.Pod{ObjectMeta: oldTemp.ObjectMeta}) - newBytes, _ := json.Marshal(v1.Pod{ObjectMeta: newTemp.ObjectMeta}) - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldBytes, newBytes, &v1.Pod{}) - if err != nil { - return nil - } - updateSpec.MetaDataPatch = patchBytes - } - return updateSpec -} - -// DefaultCheckInPlaceUpdateCompleted checks whether imageID in pod status has been changed since in-place update. -// If the imageID in containerStatuses has not been changed, we assume that kubelet has not updated -// containers in Pod. -func DefaultCheckInPlaceUpdateCompletedWithVerticalUpdate(pod *v1.Pod) error { - registerVerticalUpdate() - - if _, isInGraceState := appspub.GetInPlaceUpdateGrace(pod); isInGraceState { - return fmt.Errorf("still in grace period of in-place update") - } - - inPlaceUpdateState := appspub.InPlaceUpdateState{} - if stateStr, ok := appspub.GetInPlaceUpdateState(pod); !ok { - return nil - } else if err := json.Unmarshal([]byte(stateStr), &inPlaceUpdateState); err != nil { - return err - } - if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 || len(inPlaceUpdateState.NextContainerResources) > 0 { - return fmt.Errorf("existing containers to in-place update in next batches") - } - - if ok := verticalUpdateOperator.IsPodUpdateCompleted(pod); !ok { - return fmt.Errorf("waiting for pod vertical update") - } - - return defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate(pod, &inPlaceUpdateState) -} - -func defaultCheckContainersInPlaceUpdateCompletedWithVerticalUpdate(pod *v1.Pod, inPlaceUpdateState *appspub.InPlaceUpdateState) error { - registerVerticalUpdate() - - runtimeContainerMetaSet, err := appspub.GetRuntimeContainerMetaSet(pod) - if err != nil { - return err - } - - if inPlaceUpdateState.UpdateEnvFromMetadata { - if runtimeContainerMetaSet == nil { - return fmt.Errorf("waiting for all containers hash consistent, but runtime-container-meta not found") - } - if !checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, extractedEnvFromMetadataHash) { - return fmt.Errorf("waiting for all containers hash consistent") - } - } - - if runtimeContainerMetaSet != nil { - if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, plainHash) { - klog.V(5).Infof("Check Pod %s/%s in-place update completed for all container hash consistent", pod.Namespace, pod.Name) - return nil - } - // If it needs not to update envs from metadata, we don't have to return error here, - // in case kruise-daemon has broken for some reason and runtime-container-meta is still in an old version. - } - - containerImages := make(map[string]string, len(pod.Spec.Containers)) - containers := make(map[string]*v1.Container, len(pod.Spec.Containers)) - for i := range pod.Spec.Containers { - c := &pod.Spec.Containers[i] - containerImages[c.Name] = c.Image - containers[c.Name] = c - if len(strings.Split(c.Image, ":")) <= 1 { - containerImages[c.Name] = fmt.Sprintf("%s:latest", c.Image) - } - } - - for _, cs := range pod.Status.ContainerStatuses { - if oldStatus, ok := inPlaceUpdateState.LastContainerStatuses[cs.Name]; ok { - // TODO: we assume that users should not update workload template with new image which actually has the same imageID as the old image - if oldStatus.ImageID == cs.ImageID { - if containerImages[cs.Name] != cs.Image { - return fmt.Errorf("container %s imageID not changed", cs.Name) - } - } - // Determine whether the vertical update was successful by the resource values in the pod's spec and status - // TODO(LavenderQAQ): The third parameter here should be passed to the resources value in the status field of all containers and will need to be modified after the k8s api upgrade. - if !verticalUpdateOperator.IsContainerUpdateCompleted(pod, containers[cs.Name], &cs, inPlaceUpdateState.LastContainerStatuses[cs.Name]) { - return fmt.Errorf("container %s resources not changed", cs.Name) - } - delete(inPlaceUpdateState.LastContainerStatuses, cs.Name) + if !needRestart { + return false } } - - if len(inPlaceUpdateState.LastContainerStatuses) > 0 { - return fmt.Errorf("not found statuses of containers %v", inPlaceUpdateState.LastContainerStatuses) - } - - return nil + return containsReadinessGate(pod) } diff --git a/pkg/util/inplaceupdate/inplace_update_defaults_test.go b/pkg/util/inplaceupdate/inplace_update_defaults_test.go index dc1b12f84d..0a94916849 100644 --- a/pkg/util/inplaceupdate/inplace_update_defaults_test.go +++ b/pkg/util/inplaceupdate/inplace_update_defaults_test.go @@ -20,9 +20,13 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "testing" "time" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" + appspub "github.com/openkruise/kruise/apis/apps/pub" "github.com/openkruise/kruise/pkg/features" "github.com/openkruise/kruise/pkg/util" @@ -920,3 +924,1138 @@ func Test_defaultCalculateInPlaceUpdateSpec_VCTHash(t *testing.T) { testWhenEnable(false) } + +func getFakeControllerRevisionData() string { + oldData := `{ + "spec": { + "template": { + "$patch": "replace", + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "env": [ + { + "name": "version", + "value": "v1" + } + ], + "image": "nginx:stable-alpine22", + "imagePullPolicy": "Always", + "name": "nginx", + "resources": { + "limits": { + "cpu": "2", + "memory": "4Gi", + "sigma/eni": "2" + }, + "requests": { + "cpu": "1", + "memory": "2Gi", + "sigma/eni": "2" + } + }, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/usr/share/nginx/html", + "name": "www-data" + } + ] + }, + { + "env": [ + { + "name": "version", + "value": "v1" + } + ], + "image": "nginx:stable-alpine22", + "imagePullPolicy": "Always", + "name": "nginx2", + "resources": { + "limits": { + "cpu": "2", + "memory": "4Gi", + "sigma/eni": "2" + }, + "requests": { + "cpu": "1", + "memory": "2Gi", + "sigma/eni": "2" + } + }, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/usr/share/nginx/html", + "name": "www-data" + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "terminationGracePeriodSeconds": 30 + } + } + } +}` + return oldData +} + +func TestDefaultCalculateInPlaceUpdateSpec(t *testing.T) { + baseRevision := &apps.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-revision", + Annotations: map[string]string{}, + }, + Data: runtime.RawExtension{ + Raw: []byte(getFakeControllerRevisionData()), + }, + } + revisionGetter := func(imageChanged, resourceChanged, otherChanged bool, updateContainerNum int) *apps.ControllerRevision { + base := getFakeControllerRevisionData() + if imageChanged { + base = strings.Replace(base, `"image": "nginx:stable-alpine22"`, `"image": "nginx:stable-alpine23"`, updateContainerNum) + } + if resourceChanged { + base = strings.Replace(base, `"cpu": "1",`, `"cpu": "2",`, updateContainerNum) + } + if otherChanged { + base = strings.Replace(base, `"imagePullPolicy": "Always",`, `"imagePullPolicy": "222",`, updateContainerNum) + } + return &apps.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-revision", + Annotations: map[string]string{}, + }, + Data: runtime.RawExtension{ + Raw: []byte(base), + }, + } + } + // Define your test cases + tests := []struct { + name string + oldRevision *apps.ControllerRevision + newRevision *apps.ControllerRevision + opts *UpdateOptions + expectedResult *UpdateSpec + vpaEnabled bool + }{ + { + vpaEnabled: true, + name: "only change resource", + oldRevision: baseRevision, + newRevision: revisionGetter(false, true, false, 1), + opts: &UpdateOptions{}, + expectedResult: &UpdateSpec{ + Revision: "new-revision", + ContainerResources: map[string]v1.ResourceRequirements{ + "nginx": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + }, + VerticalUpdateOnly: true, + }, + }, + { + vpaEnabled: true, + name: "change image and resource", + oldRevision: baseRevision, + newRevision: revisionGetter(true, true, false, 1), + opts: &UpdateOptions{}, + expectedResult: &UpdateSpec{ + Revision: "new-revision", + ContainerImages: map[string]string{ + "nginx": "nginx:stable-alpine23", + }, + ContainerResources: map[string]v1.ResourceRequirements{ + "nginx": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + }, + VerticalUpdateOnly: false, + }, + }, + { + vpaEnabled: true, + name: "change other and resource", + oldRevision: baseRevision, + newRevision: revisionGetter(false, true, true, 1), + opts: &UpdateOptions{}, + expectedResult: nil, + }, + { + vpaEnabled: true, + name: "change all", + oldRevision: baseRevision, + newRevision: revisionGetter(true, true, true, 1), + opts: &UpdateOptions{}, + expectedResult: nil, + }, + // Add more test cases as needed + { + vpaEnabled: true, + name: "only change resource of two containers", + oldRevision: baseRevision, + newRevision: revisionGetter(false, true, false, 2), + opts: &UpdateOptions{}, + expectedResult: &UpdateSpec{ + Revision: "new-revision", + ContainerResources: map[string]v1.ResourceRequirements{ + "nginx": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + "nginx2": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + }, + VerticalUpdateOnly: true, + }, + }, + { + vpaEnabled: true, + name: "change image and resource of two containers", + oldRevision: baseRevision, + newRevision: revisionGetter(true, true, false, 2), + opts: &UpdateOptions{}, + expectedResult: &UpdateSpec{ + Revision: "new-revision", + ContainerImages: map[string]string{ + "nginx": "nginx:stable-alpine23", + "nginx2": "nginx:stable-alpine23", + }, + ContainerResources: map[string]v1.ResourceRequirements{ + "nginx": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + "nginx2": { + Requests: v1.ResourceList{ + "cpu": resource.MustParse("2"), + }, + }, + }, + VerticalUpdateOnly: false, + }, + }, + { + vpaEnabled: true, + name: "change other and resource of two containers", + oldRevision: baseRevision, + newRevision: revisionGetter(false, true, true, 2), + opts: &UpdateOptions{}, + expectedResult: nil, + }, + { + vpaEnabled: true, + name: "change all of two containers", + oldRevision: baseRevision, + newRevision: revisionGetter(true, true, true, 2), + opts: &UpdateOptions{}, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlaceWorkloadVerticalScaling, tt.vpaEnabled)() + result := defaultCalculateInPlaceUpdateSpec(tt.oldRevision, tt.newRevision, tt.opts) + + if !apiequality.Semantic.DeepEqual(tt.expectedResult, result) { + t.Fatalf("expected updateSpec \n%v\n but got \n%v", util.DumpJSON(tt.expectedResult), util.DumpJSON(result)) + } + }) + } +} + +func getTestPodWithResource() *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"label-k1": "foo", "label-k2": "foo"}, + Annotations: map[string]string{"annotation-k1": "foo"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_20"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k1']"}}}, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + ResizePolicy: []v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceCPU, + RestartPolicy: v1.NotRequired, + }, + }, + }, + { + Name: "c2", + Image: "c2-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_10"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k2']"}}}, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + ResizePolicy: []v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceMemory, + RestartPolicy: v1.RestartContainer, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "c1", + ImageID: "containerd://c1-img", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + { + Name: "c2", + ImageID: "containerd://c2-img", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, + } +} + +func TestDefaultPatchUpdateSpecToPod_Resource(t *testing.T) { + // disableVPA cases already be tested in TestDefaultPatchUpdateSpecToPod + now := time.Now() + Clock = testingclock.NewFakeClock(now) + pod := getTestPodWithResource() + + // Define the test cases + tests := []struct { + name string + spec *UpdateSpec + state *appspub.InPlaceUpdateState + expectedState *appspub.InPlaceUpdateState + expectedPatch map[string]interface{} + vpaEnabled bool + }{ + { + name: "only change container 0 resource cpu", + spec: &UpdateSpec{ + ContainerResources: map[string]v1.ResourceRequirements{ + "c1": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "c1": { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + vpaEnabled: true, + }, + { + name: "change container 0 resource cpu and image", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c1": "c1-img-new"}, + ContainerResources: map[string]v1.ResourceRequirements{ + "c1": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "c1": { + ImageID: "containerd://c1-img", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + vpaEnabled: true, + }, + { + name: "change two containers resource cpu and image step1", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c1": "c1-img-new", "c2": "c1-img-new"}, + ContainerResources: map[string]v1.ResourceRequirements{ + "c1": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + "c2": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "c1": { + ImageID: "containerd://c1-img", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + NextContainerResources: map[string]v1.ResourceRequirements{ + "c2": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ + ContainersRequiredReady: []string{"c1"}, + }, + NextContainerImages: map[string]string{"c2": "c1-img-new"}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + vpaEnabled: true, + }, + { + name: "change two containers resource cpu and image step2", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c2": "c1-img-new"}, + ContainerResources: map[string]v1.ResourceRequirements{ + "c2": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "c1": { + ImageID: "containerd://c2-img", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + NextContainerResources: map[string]v1.ResourceRequirements{ + "c2": { + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ + ContainersRequiredReady: []string{"c1"}, + }, + NextContainerImages: map[string]string{"c2": "c1-img-new"}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "c1": { + ImageID: "containerd://c2-img", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + "c2": { + ImageID: "containerd://c2-img", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, {Timestamp: metav1.NewTime(now), Containers: []string{"c2"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c2", + "image": "c1-img-new", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + vpaEnabled: true, + }, + } + + // Initialize the vertical update operator + verticalUpdateOperator = &VerticalUpdate{} + + // Run the test cases + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlaceWorkloadVerticalScaling, tc.vpaEnabled)() + gotPod, err := defaultPatchUpdateSpecToPod(pod.DeepCopy(), tc.spec, tc.state) + if err != nil { + t.Fatal(err) + } + + if !apiequality.Semantic.DeepEqual(tc.state, tc.expectedState) { + t.Fatalf("expected state \n%v\n but got \n%v", util.DumpJSON(tc.expectedState), util.DumpJSON(tc.state)) + } + + originPodJS, _ := json.Marshal(pod) + patchJS, _ := json.Marshal(tc.expectedPatch) + expectedPodJS, err := strategicpatch.StrategicMergePatch(originPodJS, patchJS, &v1.Pod{}) + if err != nil { + t.Fatal(err) + } + expectedPod := &v1.Pod{} + if err := json.Unmarshal(expectedPodJS, expectedPod); err != nil { + t.Fatal(err) + } + expectedPod.Annotations[appspub.InPlaceUpdateStateKey] = util.DumpJSON(tc.state) + if !apiequality.Semantic.DeepEqual(gotPod, expectedPod) { + t.Fatalf("expected pod \n%v\n but got \n%v", util.DumpJSON(expectedPod), util.DumpJSON(gotPod)) + } + }) + } +} + +func createFakePod(imageInject, resourceInject, stateInject bool, num, imageOKNum, resourceOKNumber int) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + apps.StatefulSetRevisionLabel: "new-revision", + }, + Annotations: map[string]string{}, + Name: "test-pod", + }, + } + + for i := 0; i < num; i++ { + name := fmt.Sprintf("c%d", i) + pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{ + Name: name, + }) + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{ + Name: name, + }) + } + state := appspub.InPlaceUpdateState{Revision: "new-revision", LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{}} + for i := 0; i < num; i++ { + imageID := fmt.Sprintf("img0%d", i) + lastStatus := appspub.InPlaceUpdateContainerStatus{} + if imageInject { + pod.Spec.Containers[i].Image = fmt.Sprintf("busybox:test%d", i) + pod.Status.ContainerStatuses[i].ImageID = imageID + + lastImgId := "different-img01" + img := lastImgId + if i < imageOKNum { + // ok => imgId != lastImageId + img = fmt.Sprintf("img0%d", i) + } + pod.Status.ContainerStatuses[i].ImageID = img + lastStatus.ImageID = lastImgId + } + if resourceInject { + defaultCPU := resource.MustParse("200m") + defaultMem := resource.MustParse("200Mi") + lastCPU := resource.MustParse("100m") + lastMem := resource.MustParse("100Mi") + pod.Spec.Containers[i].Resources = v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: defaultCPU, + v1.ResourceMemory: defaultMem, + }, + } + pod.Status.ContainerStatuses[i].Resources = &v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: defaultCPU, + v1.ResourceMemory: defaultMem, + }, + } + if i >= resourceOKNumber { + pod.Status.ContainerStatuses[i].Resources = &v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: lastCPU, + v1.ResourceMemory: lastMem, + }, + } + } + lastStatus.Resources = v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: lastCPU, + v1.ResourceMemory: lastMem, + }, + } + } + state.LastContainerStatuses[pod.Spec.Containers[i].Name] = lastStatus + } + + if stateInject { + if resourceInject { + state.UpdateResources = true + } + v, _ := json.Marshal(state) + pod.Annotations[appspub.InPlaceUpdateStateKey] = string(v) + } + return pod +} + +func TestDefaultCheckInPlaceUpdateCompleted_Resource(t *testing.T) { + // 构建测试用例 + tests := []struct { + name string + pod *v1.Pod + expectError bool + vpaEnabled bool + }{ + // normal case with feature gate disabled + { + name: "empty pod", + pod: createFakePod(false, false, false, 1, 0, 0), + expectError: false, + vpaEnabled: false, + }, + { + name: "image ok", + pod: createFakePod(true, false, true, 1, 1, 0), + expectError: false, + vpaEnabled: false, + }, + { + name: "image not ok", + pod: createFakePod(true, false, true, 1, 0, 0), + expectError: true, + vpaEnabled: false, + }, + { + name: "all image ok", + pod: createFakePod(true, false, true, 2, 2, 0), + expectError: false, + vpaEnabled: false, + }, + { + name: "remain image not ok", + pod: createFakePod(true, false, true, 2, 1, 0), + expectError: true, + vpaEnabled: false, + }, + { + name: "all image ok with resource ok", + pod: createFakePod(true, true, true, 2, 2, 2), + expectError: false, + vpaEnabled: false, + }, + { + name: "all image ok with resource not ok", + pod: createFakePod(true, true, true, 2, 2, 1), + expectError: false, + vpaEnabled: false, + }, + { + name: "remain image not ok with resource not ok", + pod: createFakePod(true, true, true, 2, 1, 1), + expectError: true, + vpaEnabled: false, + }, + // normal case with feature gate enabled + { + name: "empty pod", + pod: createFakePod(false, false, false, 1, 0, 0), + expectError: false, + vpaEnabled: true, + }, + { + name: "image ok", + pod: createFakePod(true, false, true, 1, 1, 0), + expectError: false, + vpaEnabled: true, + }, + { + name: "image not ok", + pod: createFakePod(true, false, true, 1, 0, 0), + expectError: true, + vpaEnabled: true, + }, + { + name: "all image ok", + pod: createFakePod(true, false, true, 2, 2, 0), + expectError: false, + vpaEnabled: true, + }, + { + name: "remain image not ok", + pod: createFakePod(true, false, true, 2, 1, 0), + expectError: true, + vpaEnabled: true, + }, + { + name: "all image ok with resource ok", + pod: createFakePod(true, true, true, 2, 2, 2), + expectError: false, + vpaEnabled: true, + }, + { + name: "all image ok with resource not ok", + pod: createFakePod(true, true, true, 2, 2, 1), + expectError: true, + vpaEnabled: true, + }, + { + name: "remain image not ok with resource not ok", + pod: createFakePod(true, true, true, 2, 1, 1), + expectError: true, + vpaEnabled: true, + }, + { + name: "only resource not ok", + pod: createFakePod(true, true, true, 3, 3, 1), + expectError: true, + vpaEnabled: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Logf("case: %v vpa-enabled: %v", tt.name, tt.vpaEnabled) + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlaceWorkloadVerticalScaling, tt.vpaEnabled)() + err := DefaultCheckInPlaceUpdateCompleted(tt.pod) + if tt.expectError { + //t.Logf("get error: %v", err) + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestDefaultCheckPodNeedsBeUnready(t *testing.T) { + // Setup test cases + tests := []struct { + name string + pod *v1.Pod + spec *UpdateSpec + expected bool + vpaEnabled bool + }{ + { + name: "contains ReadinessGates, vpa disabled", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: true, + vpaEnabled: false, + }, + { + name: "contains no ReadinessGates1", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: false, + vpaEnabled: false, + }, + { + name: "contains ReadinessGates, vpa enabled and VerticalUpdateOnly", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, vpa enabled but not VerticalUpdateOnly", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: false, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: true, + vpaEnabled: true, + }, + { + name: "contains no ReadinessGates2", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains no ReadinessGates3", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{}, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: false, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, other restart container", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "11", + ResizePolicy: []v1.ContainerResizePolicy{ + {ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}, + }, + }, + }, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{}, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, resize cpu and cpu restart policy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "11", + ResizePolicy: []v1.ContainerResizePolicy{ + {ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}, + }, + }, + }, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{ + "11": { + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + }, + expected: true, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, resize mem and cpu restart policy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "11", + ResizePolicy: []v1.ContainerResizePolicy{ + {ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}, + }, + }, + }, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{ + "11": { + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, resize mem and cpu restart policy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "11", + ResizePolicy: []v1.ContainerResizePolicy{ + {ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}, + {ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired}, + }, + }, + }, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{ + "11": { + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + }, + expected: false, + vpaEnabled: true, + }, + { + name: "contains ReadinessGates, vpa disabled and resize mem and cpu restart policy", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "11", + ResizePolicy: []v1.ContainerResizePolicy{ + {ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}, + {ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired}, + }, + }, + }, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: appspub.InPlaceUpdateReady}, + }, + }, + }, + spec: &UpdateSpec{ + VerticalUpdateOnly: true, + ContainerResources: map[string]v1.ResourceRequirements{ + "11": { + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + }, + expected: true, + vpaEnabled: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlaceWorkloadVerticalScaling, tc.vpaEnabled)() + result := defaultCheckPodNeedsBeUnready(tc.pod, tc.spec) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/pkg/util/inplaceupdate/inplace_update_test.go b/pkg/util/inplaceupdate/inplace_update_test.go index 7b19102fbe..af3295e7ec 100644 --- a/pkg/util/inplaceupdate/inplace_update_test.go +++ b/pkg/util/inplaceupdate/inplace_update_test.go @@ -64,6 +64,7 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { expectedSpec: &UpdateSpec{ Revision: "new-revision", ContainerImages: map[string]string{"c1": "foo2"}, + ContainerResources: map[string]v1.ResourceRequirements{}, ContainerRefMetadata: make(map[string]metav1.ObjectMeta), }, }, @@ -79,6 +80,7 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { expectedSpec: &UpdateSpec{ Revision: "new-revision", ContainerImages: map[string]string{"c1": "foo2"}, + ContainerResources: map[string]v1.ResourceRequirements{}, ContainerRefMetadata: make(map[string]metav1.ObjectMeta), }, }, @@ -94,6 +96,7 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { expectedSpec: &UpdateSpec{ Revision: "new-revision", ContainerImages: map[string]string{"c1": "foo2"}, + ContainerResources: map[string]v1.ResourceRequirements{}, ContainerRefMetadata: make(map[string]metav1.ObjectMeta), MetaDataPatch: []byte(`{"metadata":{"labels":{"k1":"v1"}}}`), }, @@ -110,6 +113,7 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { expectedSpec: &UpdateSpec{ Revision: "new-revision", ContainerImages: map[string]string{"c1": "foo2"}, + ContainerResources: map[string]v1.ResourceRequirements{}, ContainerRefMetadata: map[string]metav1.ObjectMeta{"c1": {Labels: map[string]string{"k": "v2"}}}, MetaDataPatch: []byte(`{"metadata":{"labels":{"k1":"v1"}}}`), UpdateEnvFromMetadata: true, @@ -127,6 +131,7 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { expectedSpec: &UpdateSpec{ Revision: "new-revision", ContainerImages: map[string]string{"c1": "foo2"}, + ContainerResources: map[string]v1.ResourceRequirements{}, ContainerRefMetadata: make(map[string]metav1.ObjectMeta), MetaDataPatch: []byte(`{"metadata":{"$deleteFromPrimitiveList/finalizers":["fz1"],"$setElementOrder/finalizers":["fz2"],"labels":{"k1":"v1","k2":null}}}`), }, diff --git a/pkg/util/inplaceupdate/inplace_update_vertical.go b/pkg/util/inplaceupdate/inplace_update_vertical.go index b28acab88f..5b7ccf4d7f 100644 --- a/pkg/util/inplaceupdate/inplace_update_vertical.go +++ b/pkg/util/inplaceupdate/inplace_update_vertical.go @@ -17,23 +17,38 @@ limitations under the License. package inplaceupdate import ( - appspub "github.com/openkruise/kruise/apis/apps/pub" + "fmt" + "strconv" + "strings" + + "github.com/appscode/jsonpatch" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + appspub "github.com/openkruise/kruise/apis/apps/pub" ) -// For In-place workload vertical scaling +// interface for In-place workload vertical scaling type VerticalUpdateInterface interface { - // Get the expected resource values of the container and its current status + // ValidateResourcePatch validates and applies the resource patch to the UpdateSpec. + ValidateResourcePatch(op *jsonpatch.Operation, oldTemp *v1.PodTemplateSpec, updateSpec *UpdateSpec) error + + // interface for container level vertical scaling api, such as k8s 1.27+ + + // SyncContainerResource Get the expected resource values of the container and its current status SyncContainerResource(container *v1.ContainerStatus, state *appspub.InPlaceUpdateState) - // Pass in the container to be modified and the expected resource values. + // UpdateContainerResource Pass in the container to be modified and the expected resource values. UpdateContainerResource(container *v1.Container, resource *v1.ResourceRequirements) - // Get the expected resource values of all containers in the pod and their current status + // IsContainerUpdateCompleted To determine whether the container has been successfully vertical updated + IsContainerUpdateCompleted(pod *v1.Pod, container *v1.Container, containerStatus *v1.ContainerStatus, lastContainerStatus appspub.InPlaceUpdateContainerStatus) bool + + // interface for pod level vertical scaling api + + // SyncPodResource Get the expected resource values of all containers in the pod and their current status SyncPodResource(pod *v1.Pod, state *appspub.InPlaceUpdateState) - // All containers of a pod can be updated at once within this interface. + // UpdatePodResource All containers of a pod can be updated at once within this interface. UpdatePodResource(pod *v1.Pod) - // To determine whether the container has been successfully vertical updated - IsContainerUpdateCompleted(pod *v1.Pod, container *v1.Container, containerStatus *v1.ContainerStatus, lastContainerStatus appspub.InPlaceUpdateContainerStatus) bool - // To determine whether the pod has been successfully vertical updated + // IsPodUpdateCompleted To determine whether the pod has been successfully vertical updated IsPodUpdateCompleted(pod *v1.Pod) bool } @@ -41,7 +56,10 @@ var verticalUpdateOperator VerticalUpdateInterface = nil // To register vertical update operations, // you can register different vertical update implementations here -func registerVerticalUpdate() { +func init() { + // Now, we assume that there is a single standard per cluster, so register in init() + // TODO(Abner-1): Perhaps we should dynamically select the verticalUpdateOperator based on the pod metadata being processed. + // give us more suggestions if you need if verticalUpdateOperator == nil { verticalUpdateOperator = &VerticalUpdate{} } @@ -52,25 +70,157 @@ type VerticalUpdate struct{} var _ VerticalUpdateInterface = &VerticalUpdate{} +func (v *VerticalUpdate) ValidateResourcePatch(op *jsonpatch.Operation, oldTemp *v1.PodTemplateSpec, updateSpec *UpdateSpec) error { + // for example: /spec/containers/0/resources/limits/cpu + words := strings.Split(op.Path, "/") + if len(words) != 7 { + return fmt.Errorf("invalid resource path: %s", op.Path) + } + idx, err := strconv.Atoi(words[3]) + if err != nil || len(oldTemp.Spec.Containers) <= idx { + return fmt.Errorf("invalid container index: %s", op.Path) + } + quantity, err := resource.ParseQuantity(op.Value.(string)) + if err != nil { + return fmt.Errorf("parse quantity error: %v", err) + } + + if !v.CanResourcesResizeInPlace(words[6]) { + return fmt.Errorf("disallowed inplace update resouece: %s", words[6]) + } + + if _, ok := updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name]; !ok { + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name] = v1.ResourceRequirements{ + Limits: make(v1.ResourceList), + Requests: make(v1.ResourceList), + } + } + switch words[5] { + case "limits": + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Limits[v1.ResourceName(words[6])] = quantity + case "requests": + updateSpec.ContainerResources[oldTemp.Spec.Containers[idx].Name].Requests[v1.ResourceName(words[6])] = quantity + } + return nil +} + // Get the resource status from the container and synchronize it to state func (v *VerticalUpdate) SyncContainerResource(container *v1.ContainerStatus, state *appspub.InPlaceUpdateState) { - // TODO(LavenderQAQ): Need to write the status synchronization module after api upgrade + if container == nil { + return + } + + if state.LastContainerStatuses == nil { + state.LastContainerStatuses = make(map[string]appspub.InPlaceUpdateContainerStatus) + } + c := state.LastContainerStatuses[container.Name] + if state.LastContainerStatuses[container.Name].Resources.Limits == nil { + c.Resources.Limits = make(map[v1.ResourceName]resource.Quantity) + } + if state.LastContainerStatuses[container.Name].Resources.Requests == nil { + c.Resources.Requests = make(map[v1.ResourceName]resource.Quantity) + } + + if container.Resources != nil { + for key, quantity := range container.Resources.Limits { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + c.Resources.Limits[key] = quantity + } + for key, quantity := range container.Resources.Requests { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + c.Resources.Requests[key] = quantity + } + } + // Store container infos whether c is empty or not. + // It will be used in IsContainerUpdateCompleted to check container resources is updated. + // case: pending pod can also be resize resource + state.LastContainerStatuses[container.Name] = c } // UpdateResource implements vertical updates by directly modifying the container's resources, // conforming to the k8s community standard func (v *VerticalUpdate) UpdateContainerResource(container *v1.Container, newResource *v1.ResourceRequirements) { + if container == nil || newResource == nil { + return + } for key, quantity := range newResource.Limits { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } container.Resources.Limits[key] = quantity } for key, quantity := range newResource.Requests { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } container.Resources.Requests[key] = quantity } } +// IsContainerUpdateCompleted directly determines whether the current container is vertically updated by the spec and status of the container, +// which conforms to the k8s community standard +// +// lstatus: lastContainerStatus record last resource:last container status => last container spec +// cspec: container spec +// cstatus: container status +// +// lstatus cspec cstatus +// +// 1. exist exist exist => compare cspec and cstatus (important) +// 2. exist empty nil => change qos is forbidden +// 3. exist empty empty => change qos is forbidden +// 4. exist empty exist => change qos is forbidden +// 5. empty empty nil => this container is not inplace-updated by kruise, ignore +// 6. empty exist exist => this container is not inplace-updated by kruise, ignore +// 7. empty exist exist => this container is not inplace-updated by kruise, ignore +func (v *VerticalUpdate) IsContainerUpdateCompleted(pod *v1.Pod, container *v1.Container, containerStatus *v1.ContainerStatus, lastContainerStatus appspub.InPlaceUpdateContainerStatus) bool { + // case 5-7 + if lastContainerStatus.Resources.Limits == nil && lastContainerStatus.Resources.Requests == nil { + return true + } + if lastContainerStatus.Resources.Limits != nil { + // case 2-4 limits + if container.Resources.Limits == nil || containerStatus == nil || + containerStatus.Resources == nil || containerStatus.Resources.Limits == nil { + return false + } + // case 1 + for name, value := range container.Resources.Limits { + // ignore resources key which can not inplace resize + if !v.CanResourcesResizeInPlace(string(name)) { + continue + } + if !value.Equal(containerStatus.Resources.Limits[name]) { + return false + } + } + } + + if lastContainerStatus.Resources.Requests != nil { + // case 2-4 requests + if container.Resources.Requests == nil || containerStatus == nil || + containerStatus.Resources == nil || containerStatus.Resources.Requests == nil { + return false + } + // case 1 + for name, value := range container.Resources.Requests { + if !v.CanResourcesResizeInPlace(string(name)) { + continue + } + if !value.Equal(containerStatus.Resources.Requests[name]) { + return false + } + } + } + return true +} + // Get the resource status from the pod and synchronize it to state func (v *VerticalUpdate) SyncPodResource(pod *v1.Pod, state *appspub.InPlaceUpdateState) { - // TODO(LavenderQAQ): Need to write the status synchronization module after api upgrade } // For the community-standard vertical scale-down implementation, @@ -79,18 +229,23 @@ func (v *VerticalUpdate) UpdatePodResource(pod *v1.Pod) { return } -// IsUpdateCompleted directly determines whether the current container is vertically updated by the spec and status of the container, -// which conforms to the k8s community standard -func (v *VerticalUpdate) IsContainerUpdateCompleted(pod *v1.Pod, container *v1.Container, containerStatus *v1.ContainerStatus, lastContainerStatus appspub.InPlaceUpdateContainerStatus) bool { - return true -} - // IsUpdateCompleted directly determines whether the current pod is vertically updated by the spec and status of the container, // which conforms to the k8s community standard func (v *VerticalUpdate) IsPodUpdateCompleted(pod *v1.Pod) bool { return true } +// only cpu and memory are allowed to be inplace updated +var allowedResizeResourceKey = map[string]bool{ + string(v1.ResourceCPU): true, + string(v1.ResourceMemory): true, +} + +func (v *VerticalUpdate) CanResourcesResizeInPlace(resourceKey string) bool { + allowed, exist := allowedResizeResourceKey[resourceKey] + return exist && allowed +} + // Internal implementation of vertical updates // type VerticalUpdateInternal struct{} diff --git a/pkg/util/inplaceupdate/inplace_update_vertical_test.go b/pkg/util/inplaceupdate/inplace_update_vertical_test.go new file mode 100644 index 0000000000..11d4537545 --- /dev/null +++ b/pkg/util/inplaceupdate/inplace_update_vertical_test.go @@ -0,0 +1,445 @@ +package inplaceupdate + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/appscode/jsonpatch" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + appspub "github.com/openkruise/kruise/apis/apps/pub" +) + +func TestValidateResourcePatch(t *testing.T) { + oldTemp := &v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("512Mi")}, + }, + }, + { + Name: "test-container2", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("512Mi")}, + }, + }, + }, + }, + } + + tests := []struct { + name string + op *jsonpatch.Operation + validateFn func(updateSpec *UpdateSpec) bool + expectedErr bool + }{ + { + name: "valid patch for cpu limits", + op: &jsonpatch.Operation{ + Path: "/spec/containers/0/resources/limits/cpu", + Value: "200m", + }, + validateFn: func(updateSpec *UpdateSpec) bool { + res := updateSpec.ContainerResources["test-container"].Limits + return res.Cpu().String() == "200m" + }, + expectedErr: false, + }, + { + name: "valid patch for memory requests", + op: &jsonpatch.Operation{ + Path: "/spec/containers/0/resources/requests/memory", + Value: "1Gi", + }, + validateFn: func(updateSpec *UpdateSpec) bool { + res := updateSpec.ContainerResources["test-container"].Requests + return res.Memory().String() == "1Gi" + }, + expectedErr: false, + }, + { + name: "invalid patch for non-existent container", + op: &jsonpatch.Operation{ + Path: "/spec/containers/2/resources/limits/cpu", + Value: "200m", + }, + expectedErr: true, + }, + { + name: "invalid patch for non-standard resource", + op: &jsonpatch.Operation{ + Path: "/spec/containers/0/resources/limits/gpu", + Value: "1", + }, + expectedErr: true, + }, + { + name: "invalid patch for non-quantity value", + op: &jsonpatch.Operation{ + Path: "/spec/containers/0/resources/limits/cpu", + Value: "not-a-quantity", + }, + expectedErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + updateSpec := &UpdateSpec{ + ContainerResources: make(map[string]v1.ResourceRequirements), + } + vu := &VerticalUpdate{} + err := vu.ValidateResourcePatch(tt.op, oldTemp, updateSpec) + if tt.expectedErr { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + } + if tt.validateFn != nil { + ok := tt.validateFn(updateSpec) + assert.True(t, ok) + } + }) + } +} + +func TestSyncContainerResource(t *testing.T) { + vu := VerticalUpdate{} + + type testcase struct { + name string + containerStatus *v1.ContainerStatus + state *appspub.InPlaceUpdateState + expectedState *appspub.InPlaceUpdateState + keyNotExist bool + } + + container := &v1.ContainerStatus{ + Name: "test-container", + Resources: &v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + } + testcases := []testcase{ + { + name: "normal case", + containerStatus: container, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "test-container": { + Resources: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("7Gi"), + }, + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + }, + }, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "test-container": { + Resources: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + { + name: "empty LastContainerStatuses in state", + containerStatus: container, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: nil, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{ + "test-container": { + Resources: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + { + name: "nil containerStatus", + containerStatus: nil, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{}, + }, + keyNotExist: true, + }, + { + name: "nil containerStatus resources", + containerStatus: &v1.ContainerStatus{ + Name: "test-container", + Resources: nil, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"test-container": { + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{}, + Limits: map[v1.ResourceName]resource.Quantity{}, + }, + }}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + vu.SyncContainerResource(tc.containerStatus, tc.state) + actualContainerStatus, ok := tc.state.LastContainerStatuses[container.Name] + assert.Equal(t, !tc.keyNotExist, ok, "Container status should be present in the state: %v", tc.keyNotExist) + b, _ := json.Marshal(actualContainerStatus) + t.Logf("container status: %v", string(b)) + assert.True(t, reflect.DeepEqual(tc.expectedState.LastContainerStatuses[container.Name], actualContainerStatus), "Container status should match expected state") + }) + } + +} + +func TestIsContainerUpdateCompleted(t *testing.T) { + v := VerticalUpdate{} + + tests := []struct { + name string + container v1.Container + containerStatus v1.ContainerStatus + lastContainerStatus appspub.InPlaceUpdateContainerStatus + expectedResult bool + }{ + { + name: "Test status ok", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("100m"), "memory": resource.MustParse("128Mi")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("64Mi")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("100m"), "memory": resource.MustParse("128Mi")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("64Mi")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + expectedResult: true, + }, + { + name: "Test status not ok", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("100m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + expectedResult: false, + }, + { + name: "Test status not ok", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("100Mi")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("50Mi")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("200Mi")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("50Mi")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("200Mi")}, + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("50Mi")}, + }, + }, + expectedResult: false, + }, + { + name: "Test status not ok", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("100Mi")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("200Mi")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("200Mi")}, + }, + }, + expectedResult: false, + }, + { + name: "Test status not ok", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("100Mi")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("50Mi")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("50Mi")}, + }, + }, + expectedResult: false, + }, + { + name: "container and containerStatus are empty", + container: v1.Container{ + Resources: v1.ResourceRequirements{}, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{}, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{}, + expectedResult: true, + }, + { + name: "empty container and nil containerStatus", + container: v1.Container{ + Resources: v1.ResourceRequirements{}, + }, + containerStatus: v1.ContainerStatus{ + Resources: nil, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{}, + expectedResult: true, + }, + + { + name: "container is empty", + container: v1.Container{ + Resources: v1.ResourceRequirements{}, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + expectedResult: false, + }, + { + name: "containerStatus is nil", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: nil, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + expectedResult: false, + }, + { + name: "containerStatus is empty", + container: v1.Container{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + containerStatus: v1.ContainerStatus{ + Resources: &v1.ResourceRequirements{}, + }, + lastContainerStatus: appspub.InPlaceUpdateContainerStatus{ + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{"cpu": resource.MustParse("200m")}, + Requests: v1.ResourceList{"cpu": resource.MustParse("50m")}, + }, + }, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pod := &v1.Pod{} + result := v.IsContainerUpdateCompleted(pod, &tt.container, &tt.containerStatus, tt.lastContainerStatus) + assert.Equal(t, tt.expectedResult, result) + }) + } +} diff --git a/pkg/webhook/pod/validating/pod_unavailable_budget.go b/pkg/webhook/pod/validating/pod_unavailable_budget.go index a7c1047450..39a8ceb43a 100644 --- a/pkg/webhook/pod/validating/pod_unavailable_budget.go +++ b/pkg/webhook/pod/validating/pod_unavailable_budget.go @@ -19,8 +19,6 @@ package validating import ( "context" - policyv1alpha1 "github.com/openkruise/kruise/apis/policy/v1alpha1" - "github.com/openkruise/kruise/pkg/control/pubcontrol" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,6 +27,9 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/policy" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/openkruise/kruise/apis/policy/v1alpha1" + "github.com/openkruise/kruise/pkg/control/pubcontrol" ) // parameters: diff --git a/pkg/webhook/pod/validating/workloadspread.go b/pkg/webhook/pod/validating/workloadspread.go index 64c1709579..217a239e43 100644 --- a/pkg/webhook/pod/validating/workloadspread.go +++ b/pkg/webhook/pod/validating/workloadspread.go @@ -19,7 +19,6 @@ package validating import ( "context" - wsutil "github.com/openkruise/kruise/pkg/util/workloadspread" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,6 +27,8 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/policy" + wsutil "github.com/openkruise/kruise/pkg/util/workloadspread" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) diff --git a/test/e2e/apps/daemonset.go b/test/e2e/apps/daemonset.go index e130b65a95..007207d239 100644 --- a/test/e2e/apps/daemonset.go +++ b/test/e2e/apps/daemonset.go @@ -7,11 +7,6 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - appspub "github.com/openkruise/kruise/apis/apps/pub" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - "github.com/openkruise/kruise/pkg/util/lifecycle" - "github.com/openkruise/kruise/test/e2e/framework" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -23,6 +18,12 @@ import ( clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" daemonutil "k8s.io/kubernetes/pkg/controller/daemon/util" + + appspub "github.com/openkruise/kruise/apis/apps/pub" + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/openkruise/kruise/pkg/util/lifecycle" + "github.com/openkruise/kruise/test/e2e/framework" ) var _ = SIGDescribe("DaemonSet", func() { @@ -331,12 +332,6 @@ var _ = SIGDescribe("DaemonSet", func() { v1.ResourceCPU: resource.MustParse("100m"), }, } - ads.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{ - { - Name: "TEST", - Value: "", - }, - } ds, err := tester.CreateDaemonSet(ads) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -357,12 +352,8 @@ var _ = SIGDescribe("DaemonSet", func() { v1.ResourceCPU: resource.MustParse("120m"), }, } - ads.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{ - { - Name: "TEST", - Value: "TEST", - }, - } + // when enable InPlaceWorkloadVerticalScaling feature, just resize request will not delete pod + ads.Spec.Template.Spec.Containers[0].Env = append(ads.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "test", Value: "test"}) }) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error to update daemon") diff --git a/test/e2e/apps/inplace_vpa.go b/test/e2e/apps/inplace_vpa.go new file mode 100644 index 0000000000..a483327107 --- /dev/null +++ b/test/e2e/apps/inplace_vpa.go @@ -0,0 +1,1106 @@ +package apps + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/util/slice" + imageutils "k8s.io/kubernetes/test/utils/image" + "k8s.io/utils/diff" + utilpointer "k8s.io/utils/pointer" + + appspub "github.com/openkruise/kruise/apis/apps/pub" + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/kruise/apis/apps/v1beta1" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/openkruise/kruise/pkg/util" + "github.com/openkruise/kruise/test/e2e/framework" +) + +var _ = SIGDescribe("InplaceVPA", func() { + f := framework.NewDefaultFramework("inplace-vpa") + var ns string + var c clientset.Interface + var kc kruiseclientset.Interface + var tester *framework.CloneSetTester + var randStr string + IsKubernetesVersionLessThan127 := func() bool { + if v, err := c.Discovery().ServerVersion(); err != nil { + framework.Logf("Failed to discovery server version: %v", err) + } else if minor, err := strconv.Atoi(v.Minor); err != nil || minor < 27 { + return true + } + return false + } + + ginkgo.BeforeEach(func() { + c = f.ClientSet + kc = f.KruiseClientSet + ns = f.Namespace.Name + tester = framework.NewCloneSetTester(c, kc, ns) + randStr = rand.String(10) + + if IsKubernetesVersionLessThan127() { + ginkgo.Skip("kip this e2e case, it can only run on K8s >= 1.27") + } + }) + + oldResource := v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + } + newResource := v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("800m"), + v1.ResourceMemory: resource.MustParse("800Mi"), + }, + } + // TODO(Abner-1)update only inplace resources may fail in kind e2e. + // I will resolve it in another PR + framework.KruisePDescribe("CloneSet Updating with only inplace resource", func() { + var err error + testUpdateResource := func(fn func(spec *v1.PodSpec), resizePolicy []v1.ContainerResizePolicy) { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers[0].ResizePolicy = resizePolicy + imageConfig := imageutils.GetConfig(imageutils.Nginx) + imageConfig.SetRegistry("docker.io/library") + imageConfig.SetVersion("alpine") + cs.Spec.Template.Spec.Containers[0].Image = imageConfig.GetE2EImage() + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 60*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + oldPodResource := getPodResource(pods[0]) + + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + if cs.Annotations == nil { + cs.Annotations = map[string]string{} + } + fn(&cs.Spec.Template.Spec) + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + lastGeneration := cs.Generation + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + framework.Logf("CloneSet last %v, generation %v, observedGeneration %v", lastGeneration, cs.Generation, cs.Status.ObservedGeneration) + start := time.Now() + ginkgo.By("Wait for all pods updated and ready") + a, b, c := getResourcesInfo(pods[0]) + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + a1, b1, c1 := getResourcesInfo(pods[0]) + if a1 != a || b1 != b || c1 != c { + framework.Logf("updateSpec %v", a1) + framework.Logf("spec %v", b1) + framework.Logf("container status %v ", c1) + a, b, c = a1, b1, c1 + } + SkipTestWhenCgroupError(pods[0]) + + return cs.Status.UpdatedAvailableReplicas + }, 600*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + duration := time.Since(start) + framework.Logf("cloneset with replica resize resource consume %vs", duration.Seconds()) + + ginkgo.By("Verify the resource changed and status=spec") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(checkPodResource(pods, oldPodResource, []string{"redis"})).Should(gomega.Equal(true)) + } + testWithResizePolicy := func(resizePolicy []v1.ContainerResizePolicy) { + // This can't be Conformance yet. + ginkgo.PIt("in-place update resources scale down 1", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale down cpu and memory request") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("100m") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale down 2", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale down cpu and memory limit") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("800m") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("800Mi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale down 3", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale down cpu and memory request&limit") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("100m") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("800m") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("800Mi") + } + testUpdateResource(fn, resizePolicy) + }) + + // This can't be Conformance yet. + ginkgo.PIt("in-place update resources scale up 1", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu and memory request") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("300m") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("300Mi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up 2", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu and memory limit") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("2") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("2Gi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up 3", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu and memory request&limit") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("300m") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("300Mi") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("2") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("2Gi") + } + testUpdateResource(fn, resizePolicy) + }) + + // This can't be Conformance yet. + ginkgo.PIt("in-place update resources scale up only cpu 1", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu request") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("300m") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up only cpu limit", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu limit") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("2") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up only cpu 3", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up cpu request&limit") + spec.Containers[0].Resources.Requests[v1.ResourceCPU] = resource.MustParse("300m") + spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("2") + } + testUpdateResource(fn, resizePolicy) + }) + + // This can't be Conformance yet. + ginkgo.PIt("in-place update resources scale up only mem 1", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up memory request") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("300Mi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up only mem limit", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up memory limit") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("2Gi") + } + testUpdateResource(fn, resizePolicy) + }) + ginkgo.PIt("in-place update resources scale up only mem 3", func() { + fn := func(spec *v1.PodSpec) { + ginkgo.By("scale up memory request&limit") + spec.Containers[0].Resources.Requests[v1.ResourceMemory] = resource.MustParse("300Mi") + spec.Containers[0].Resources.Limits[v1.ResourceMemory] = resource.MustParse("2Gi") + } + testUpdateResource(fn, resizePolicy) + }) + } + + ginkgo.By("inplace update resources with RestartContainer policy") + testWithResizePolicy([]v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer, + }, + { + ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer, + }, + }) + }) + + framework.KruiseDescribe("CloneSet Updating with inplace resource", func() { + var err error + testWithResizePolicy := func(resizePolicy []v1.ContainerResizePolicy) { + testUpdateResource := func(fn func(pod *v1.PodTemplateSpec), resizePolicy []v1.ContainerResizePolicy) { + j, _ := json.Marshal(resizePolicy) + ginkgo.By(fmt.Sprintf("resize policy %v", string(j))) + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers[0].ResizePolicy = resizePolicy + cs.Spec.Template.Spec.Containers[0].Image = NginxImage + cs.Spec.Template.ObjectMeta.Labels["test-env"] = "foo" + cs.Spec.Template.Spec.Containers[0].Env = append(cs.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{ + Name: "TEST_ENV", + ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['test-env']"}}, + }) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 60*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + oldPodUID := pods[0].UID + oldContainerStatus := pods[0].Status.ContainerStatuses[0] + oldPodResource := getPodResource(pods[0]) + + ginkgo.By("Update test-env label") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + if cs.Annotations == nil { + cs.Annotations = map[string]string{} + } + fn(&cs.Spec.Template) + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + a, b, c := getResourcesInfo(pods[0]) + ginkgo.By("Wait for all pods updated and ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Cloneset updatedReplicas %v updatedReady %v updatedAvailableReplicas %v ", + cs.Status.UpdatedReplicas, cs.Status.UpdatedReadyReplicas, cs.Status.UpdatedAvailableReplicas) + + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + a1, b1, c1 := getResourcesInfo(pods[0]) + if a1 != a || b1 != b || c1 != c { + framework.Logf("updateSpec %v", a1) + framework.Logf("spec %v", b1) + framework.Logf("container status %v ", c1) + a, b, c = a1, b1, c1 + } + SkipTestWhenCgroupError(pods[0]) + return cs.Status.UpdatedAvailableReplicas + }, 600*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify the containerID changed and restartCount should be 1") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + newPodUID := pods[0].UID + newContainerStatus := pods[0].Status.ContainerStatuses[0] + + gomega.Expect(oldPodUID).Should(gomega.Equal(newPodUID)) + gomega.Expect(newContainerStatus.ContainerID).NotTo(gomega.Equal(oldContainerStatus.ContainerID)) + gomega.Expect(newContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(checkPodResource(pods, oldPodResource, []string{"redis"})).Should(gomega.Equal(true)) + } + // This can't be Conformance yet. + ginkgo.It("in-place update image and resource", func() { + fn := func(pod *v1.PodTemplateSpec) { + spec := &pod.Spec + ginkgo.By("in-place update image and resource") + spec.Containers[0].Image = NewNginxImage + spec.Containers[0].Resources = newResource + } + testUpdateResource(fn, resizePolicy) + }) + + // This can't be Conformance yet. + ginkgo.FIt("in-place update resource and env from label", func() { + fn := func(pod *v1.PodTemplateSpec) { + spec := &pod.Spec + ginkgo.By("in-place update resource and env from label") + pod.Labels["test-env"] = "bar" + spec.Containers[0].Resources = newResource + } + testUpdateResource(fn, resizePolicy) + }) + + // This can't be Conformance yet. + ginkgo.It("in-place update image, resource and env from label", func() { + fn := func(pod *v1.PodTemplateSpec) { + spec := &pod.Spec + ginkgo.By("in-place update image, resource and env from label") + spec.Containers[0].Image = NewNginxImage + pod.Labels["test-env"] = "bar" + spec.Containers[0].Resources = newResource + } + testUpdateResource(fn, resizePolicy) + }) + + framework.ConformanceIt("in-place update two container image, resource with priorities successfully", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers[0].ResizePolicy = resizePolicy + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Command: []string{"sleep", "999"}, + Env: []v1.EnvVar{{Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}}, + Lifecycle: &v1.Lifecycle{PostStart: &v1.LifecycleHandler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(3) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 60*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + oldPodResource := getPodResource(pods[0]) + + ginkgo.By("Update images of nginx and redis") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + cs.Spec.Template.Spec.Containers[0].Resources = newResource + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for all pods updated and ready") + a, b, c := getResourcesInfo(pods[0]) + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + a1, b1, c1 := getResourcesInfo(pods[0]) + if a1 != a || b1 != b || c1 != c { + framework.Logf("updateSpec %v", a1) + framework.Logf("spec %v", b1) + framework.Logf("container status %v ", c1) + a, b, c = a1, b1, c1 + } + SkipTestWhenCgroupError(pods[0]) + + return cs.Status.UpdatedAvailableReplicas + }, 600*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify two containers have all updated in-place") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + pod := pods[0] + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + redisContainerStatus := util.GetContainerStatus("redis", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + gomega.Expect(redisContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify nginx should be stopped after new redis has started 10s") + gomega.Expect(nginxContainerStatus.LastTerminationState.Terminated.FinishedAt.After(redisContainerStatus.State.Running.StartedAt.Time.Add(time.Second*10))). + Should(gomega.Equal(true), fmt.Sprintf("nginx finish at %v is not after redis start %v + 10s", + nginxContainerStatus.LastTerminationState.Terminated.FinishedAt, + redisContainerStatus.State.Running.StartedAt)) + + ginkgo.By("Verify in-place update state in two batches") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(2)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[1].Containers).Should(gomega.Equal([]string{"nginx"})) + gomega.Expect(checkPodResource(pods, oldPodResource, []string{"redis"})).Should(gomega.Equal(true)) + }) + + framework.ConformanceIt("in-place update two container image, resource with priorities, should not update the next when the previous one failed", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Env: []v1.EnvVar{{Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}}, + Lifecycle: &v1.Lifecycle{PostStart: &v1.LifecycleHandler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(3) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 60*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + oldPodResource := getPodResource(pods[0]) + + ginkgo.By("Update images of nginx and redis") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + cs.Spec.Template.Spec.Containers[0].Resources = newResource + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for redis failed to start") + var pod *v1.Pod + gomega.Eventually(func() *v1.ContainerStateTerminated { + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + pod = pods[0] + redisContainerStatus := util.GetContainerStatus("redis", pod) + return redisContainerStatus.LastTerminationState.Terminated + }, 60*time.Second, time.Second).ShouldNot(gomega.BeNil()) + + gomega.Eventually(func() *v1.ContainerStateWaiting { + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + pod = pods[0] + redisContainerStatus := util.GetContainerStatus("redis", pod) + return redisContainerStatus.State.Waiting + }, 60*time.Second, time.Second).ShouldNot(gomega.BeNil()) + + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(0))) + + ginkgo.By("Verify in-place update state only one batch and remain next") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(1)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.NextContainerImages).Should(gomega.Equal(map[string]string{"nginx": NewNginxImage})) + gomega.Expect(checkPodResource(pods, oldPodResource, []string{"redis"})).Should(gomega.Equal(false)) + }) + + //This can't be Conformance yet. + ginkgo.It("in-place update two container image, resource and env from metadata with priorities", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers[0].ResizePolicy = resizePolicy + cs.Spec.Template.Annotations = map[string]string{"config": "foo"} + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}, + {Name: "CONFIG", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.annotations['config']"}}}, + }, + Lifecycle: &v1.Lifecycle{PostStart: &v1.LifecycleHandler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 60*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + oldPodResource := getPodResource(pods[0]) + + ginkgo.By("Update nginx image and config annotation") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Annotations["config"] = "bar" + cs.Spec.Template.Spec.Containers[0].Resources = newResource + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for all pods updated and ready") + a, b, c := getResourcesInfo(pods[0]) + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + a1, b1, c1 := getResourcesInfo(pods[0]) + if a1 != a || b1 != b || c1 != c { + framework.Logf("updateSpec %v", a1) + framework.Logf("spec %v", b1) + framework.Logf("container status %v ", c1) + a, b, c = a1, b1, c1 + } + SkipTestWhenCgroupError(pods[0]) + + return cs.Status.UpdatedAvailableReplicas + }, 600*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify two containers have all updated in-place") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + pod := pods[0] + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + redisContainerStatus := util.GetContainerStatus("redis", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + gomega.Expect(redisContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify nginx should be stopped after new redis has started") + gomega.Expect(nginxContainerStatus.LastTerminationState.Terminated.FinishedAt.After(redisContainerStatus.State.Running.StartedAt.Time.Add(time.Second*10))). + Should(gomega.Equal(true), fmt.Sprintf("nginx finish at %v is not after redis start %v + 10s", + nginxContainerStatus.LastTerminationState.Terminated.FinishedAt, + redisContainerStatus.State.Running.StartedAt)) + + ginkgo.By("Verify in-place update state in two batches") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(2)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[1].Containers).Should(gomega.Equal([]string{"nginx"})) + gomega.Expect(checkPodResource(pods, oldPodResource, []string{"redis"})).Should(gomega.Equal(true)) + }) + } + + ginkgo.By("inplace update resources with RestartContainer policy") + testWithResizePolicy([]v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer, + }, + { + ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer, + }, + }) + ginkgo.By("inplace update resources with memory RestartContainer policy") + testWithResizePolicy([]v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer, + }, + }) + ginkgo.By("inplace update resources with cpu RestartContainer policy") + testWithResizePolicy([]v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer, + }, + }) + ginkgo.By("inplace update resources with NotRequired policy") + testWithResizePolicy([]v1.ContainerResizePolicy{ + { + ResourceName: v1.ResourceCPU, RestartPolicy: v1.NotRequired, + }, + { + ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired, + }, + }) + }) + + framework.KruiseDescribe("Basic StatefulSet functionality [StatefulSetBasic]", func() { + ssName := "ss" + labels := map[string]string{ + "foo": "bar", + "baz": "blah", + } + headlessSvcName := "test" + var statefulPodMounts, podMounts []v1.VolumeMount + var ss *appsv1beta1.StatefulSet + + ginkgo.BeforeEach(func() { + statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} + podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} + ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) + + ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) + headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + testfn := func(ss *appsv1beta1.StatefulSet) { + e := v1.EnvVar{ + Name: "test-env", + ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k2']"}}, + } + ss.Spec.Template.Spec.Containers[0].Env = append(ss.Spec.Template.Spec.Containers[0].Env, e) + if len(ss.Spec.Template.Labels) == 0 { + ss.Spec.Template.Labels = map[string]string{} + } + ss.Spec.Template.Labels["label-k2"] = "hello" + + oldImage := ss.Spec.Template.Spec.Containers[0].Image + currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision + updateFn := func(set *appsv1beta1.StatefulSet) { + currentRevision = set.Status.CurrentRevision + ss.Spec.Template.Labels["label-k2"] = "test" + container := &set.Spec.Template.Spec.Containers[0] + container.Resources = newResource + } + sst := framework.NewStatefulSetTester(c, kc) + + validaFn := func(pods []v1.Pod) { + ss = sst.WaitForStatus(ss) + updateRevision = ss.Status.UpdateRevision + baseValidFn(pods, oldImage, updateRevision) + for i := range pods { + diff.ObjectDiff(pods[i].Spec.Containers[0].Resources, newResource) + } + } + rollbackFn := func(set *appsv1beta1.StatefulSet) { + ss.Spec.Template.Labels["label-k2"] = "hello" + container := &set.Spec.Template.Spec.Containers[0] + container.Resources = oldResource + } + validaFn2 := func(pods []v1.Pod) { + ss = sst.WaitForStatus(ss) + baseValidFn(pods, oldImage, currentRevision) + for i := range pods { + diff.ObjectDiff(pods[i].Spec.Containers[0].Resources, oldResource) + } + } + rollbackWithUpdateFnTest(c, kc, ns, ss, updateFn, rollbackFn, validaFn, validaFn2) + } + + ginkgo.It("should perform rolling updates(including resources) and roll backs with pvcs", func() { + ginkgo.By("Creating a new StatefulSet") + ss.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{ + PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType, + } + ss.Spec.Template.Spec.ReadinessGates = []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}} + testfn(ss) + }) + + ginkgo.It("should perform rolling updates(including resources) and roll backs", func() { + ginkgo.By("Creating a new StatefulSet") + ss = framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) + ss.Spec.UpdateStrategy.RollingUpdate = &appsv1beta1.RollingUpdateStatefulSetStrategy{ + PodUpdatePolicy: appsv1beta1.InPlaceIfPossiblePodUpdateStrategyType, + } + ss.Spec.Template.Spec.ReadinessGates = []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}} + testfn(ss) + }) + + }) + + framework.KruiseDescribe("Basic DaemonSet functionality [DaemonSetBasic]", func() { + var tester *framework.DaemonSetTester + ginkgo.BeforeEach(func() { + c = f.ClientSet + kc = f.KruiseClientSet + ns = f.Namespace.Name + tester = framework.NewDaemonSetTester(c, kc, ns) + }) + dsName := "e2e-ds" + + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { + framework.DumpDebugInfo(c, ns) + } + framework.Logf("Deleting DaemonSet %s/%s in cluster", ns, dsName) + tester.DeleteDaemonSet(ns, dsName) + }) + newImage := NewNginxImage + framework.ConformanceIt("should upgrade resources and image one by one on steps if there is pre-update hook", func() { + label := map[string]string{framework.DaemonSetNameLabel: dsName} + hookKey := "my-pre-update" + + ginkgo.By(fmt.Sprintf("Creating DaemonSet %q with pre-delete hook", dsName)) + maxUnavailable := intstr.IntOrString{IntVal: int32(1)} + ads := tester.NewDaemonSet(dsName, label, WebserverImage, appsv1alpha1.DaemonSetUpdateStrategy{ + Type: appsv1alpha1.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &appsv1alpha1.RollingUpdateDaemonSet{ + Type: appsv1alpha1.InplaceRollingUpdateType, + MaxUnavailable: &maxUnavailable, + }, + }) + ads.Spec.Template.Labels = map[string]string{framework.DaemonSetNameLabel: dsName, hookKey: "true"} + ads.Spec.Template.Spec.Containers[0].Resources = oldResource + ds, err := tester.CreateDaemonSet(ads) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Check that daemon pods launch on every node of the cluster") + err = wait.PollImmediate(framework.DaemonSetRetryPeriod, framework.DaemonSetRetryTimeout, tester.CheckRunningOnAllNodes(ds)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start") + + err = tester.CheckDaemonStatus(dsName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + oldPodList, err := tester.ListDaemonPods(label) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Only update daemonset resources and image") + err = tester.UpdateDaemonSet(ds.Name, func(ads *appsv1alpha1.DaemonSet) { + ads.Spec.Template.Spec.Containers[0].Image = newImage + ads.Spec.Template.Spec.Containers[0].Resources = newResource + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error to update daemon") + + gomega.Eventually(func() int64 { + ads, err = tester.GetDaemonSet(dsName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return ads.Status.ObservedGeneration + }, time.Second*30, time.Second*3).Should(gomega.Equal(int64(2))) + + ginkgo.By("Wait for all pods updated and ready") + lastId := len(oldPodList.Items) - 1 + a, b, c := getResourcesInfo(&oldPodList.Items[lastId]) + gomega.Eventually(func() int32 { + ads, err = tester.GetDaemonSet(dsName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + updatedAvailable := ads.Status.UpdatedNumberScheduled + updatedAvailable -= ads.Status.NumberUnavailable + framework.Logf("UpdatedNumber %v Unavailable %v UpdatedAvailable %v", ads.Status.UpdatedNumberScheduled, + ads.Status.NumberUnavailable, updatedAvailable) + + oldPodList, err = tester.ListDaemonPods(label) + pod := &oldPodList.Items[lastId] + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + a1, b1, c1 := getResourcesInfo(pod) + if a1 != a || b1 != b || c1 != c { + framework.Logf("updateSpec %v", a1) + framework.Logf("spec %v", b1) + framework.Logf("container status %v ", c1) + a, b, c = a1, b1, c1 + } + SkipTestWhenCgroupError(pod) + return updatedAvailable + }, 600*time.Second, 3*time.Second).Should(gomega.Equal(int32(len(oldPodList.Items)))) + + ginkgo.By("Verify the podUId changed and restartCount should be 1") + pods, err := tester.ListDaemonPods(label) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + lastStatus := map[string]*v1.ResourceRequirements{"busybox": &oldResource} + for _, pod := range pods.Items { + gomega.Expect(checkPodResource([]*v1.Pod{&pod}, lastStatus, []string{"redis"})).Should(gomega.Equal(true)) + gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).Should(gomega.Equal(int32(1))) + } + }) + }) +}) + +func ResourceEqual(spec, status *v1.ResourceRequirements) bool { + + if spec == nil && status == nil { + return true + } + if status == nil || spec == nil { + return false + } + if spec.Requests != nil { + if status.Requests == nil { + return false + } + if !spec.Requests.Cpu().Equal(*status.Requests.Cpu()) || + !spec.Requests.Memory().Equal(*status.Requests.Memory()) { + return false + } + } + if spec.Limits != nil { + if status.Limits == nil { + return false + } + if !spec.Limits.Cpu().Equal(*status.Limits.Cpu()) || + !spec.Limits.Memory().Equal(*status.Limits.Memory()) { + return false + } + } + return true +} + +func getPodResource(pod *v1.Pod) map[string]*v1.ResourceRequirements { + containerResource := map[string]*v1.ResourceRequirements{} + for _, c := range pod.Spec.Containers { + c := c + containerResource[c.Name] = &c.Resources + } + return containerResource +} + +func checkPodResource(pods []*v1.Pod, last map[string]*v1.ResourceRequirements, ignoreContainer []string) (res bool) { + defer func() { + if !res && len(last) == 1 { + pjson, _ := json.Marshal(pods) + ljson, _ := json.Marshal(last) + framework.Logf("pod %v, last resource %v", string(pjson), string(ljson)) + } + }() + for _, pod := range pods { + containerResource := getPodResource(pod) + for _, c := range pod.Spec.Containers { + if slice.ContainsString(ignoreContainer, c.Name, nil) { + continue + } + lastResource := last[c.Name] + if ResourceEqual(&c.Resources, lastResource) { + framework.Logf("container %s resource unchanged", c.Name) + // resource unchanged + return false + } + } + for _, cs := range pod.Status.ContainerStatuses { + cname := cs.Name + spec := containerResource[cname] + if !ResourceEqual(spec, cs.Resources) { + framework.Logf("container %v spec != status", cname) + // resource spec != status + return false + } + } + } + + // resource changed and spec = status + return true +} + +func baseValidFn(pods []v1.Pod, image string, revision string) { + for i := range pods { + gomega.Expect(pods[i].Spec.Containers[0].Image).To(gomega.Equal(image), + fmt.Sprintf(" Pod %s/%s has image %s not have image %s", + pods[i].Namespace, + pods[i].Name, + pods[i].Spec.Containers[0].Image, + image)) + gomega.Expect(pods[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(revision), + fmt.Sprintf("Pod %s/%s revision %s is not equal to revision %s", + pods[i].Namespace, + pods[i].Name, + pods[i].Labels[apps.StatefulSetRevisionLabel], + revision)) + } +} +func rollbackWithUpdateFnTest(c clientset.Interface, kc kruiseclientset.Interface, ns string, ss *appsv1beta1.StatefulSet, + updateFn, rollbackFn func(update *appsv1beta1.StatefulSet), validateFn1, validateFn2 func([]v1.Pod)) { + sst := framework.NewStatefulSetTester(c, kc) + sst.SetHTTPProbe(ss) + ss, err := kc.AppsV1beta1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) + ss = sst.WaitForStatus(ss) + currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), + fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", + ss.Namespace, ss.Name, updateRevision, currentRevision)) + pods := sst.GetPodList(ss) + for i := range pods.Items { + gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), + fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[apps.StatefulSetRevisionLabel], + currentRevision)) + } + sst.SortStatefulPods(pods) + err = sst.BreakPodHTTPProbe(ss, &pods.Items[1]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) + newImage := NewNginxImage + oldImage := ss.Spec.Template.Spec.Containers[0].Image + + ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage)) + gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, updateFn) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a new revision") + ss = sst.WaitForStatus(ss) + currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), + "Current revision should not equal update revision during rolling update") + + ginkgo.By("Updating Pods in reverse ordinal order") + pods = sst.GetPodList(ss) + sst.SortStatefulPods(pods) + err = sst.RestorePodHTTPProbe(ss, &pods.Items[1]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) + ss, pods = sst.WaitForRollingUpdate(ss) + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), + fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", + ss.Namespace, + ss.Name, + ss.Status.CurrentRevision, + updateRevision)) + validateFn1(pods.Items) + + ginkgo.By("Rolling back to a previous revision") + err = sst.BreakPodHTTPProbe(ss, &pods.Items[1]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) + priorRevision := currentRevision + ss, err = framework.UpdateStatefulSetWithRetries(kc, ns, ss.Name, rollbackFn) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ss = sst.WaitForStatus(ss) + currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision + gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), + "Current revision should not equal update revision during roll back") + gomega.Expect(priorRevision).To(gomega.Equal(updateRevision), + "Prior revision should equal update revision during roll back") + + ginkgo.By("Rolling back update in reverse ordinal order") + pods = sst.GetPodList(ss) + sst.SortStatefulPods(pods) + sst.RestorePodHTTPProbe(ss, &pods.Items[1]) + ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) + ss, pods = sst.WaitForRollingUpdate(ss) + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision), + fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", + ss.Namespace, + ss.Name, + ss.Status.CurrentRevision, + updateRevision)) + validateFn2(pods.Items) +} + +func SkipTestWhenCgroupError(pod *v1.Pod) { + if IsPodCreateError(pod) { + ginkgo.Skip("create pod error and message is cpu.cfs_quota_us: invalid argument: unknown\n" + + "This may be caused by runc version or kernel version.") + } +} + +func getResourcesInfo(po *v1.Pod) (string, string, string) { + if po == nil { + return "", "", "" + } + lastState := "" + if len(po.Annotations) > 0 { + lastState = po.Annotations[appspub.InPlaceUpdateStateKey] + } + specResources := po.Spec.Containers[0].Resources + containerStatus := po.Status.ContainerStatuses[0] + + specResourcesJson, _ := json.Marshal(specResources) + containerStatusJson, _ := json.Marshal(containerStatus) + return lastState, string(specResourcesJson), string(containerStatusJson) +} +func IsPodCreateError(pod *v1.Pod) bool { + if pod == nil { + return false + } + if len(pod.Status.ContainerStatuses) == 0 { + return false + } + for _, cs := range pod.Status.ContainerStatuses { + if cs.LastTerminationState.Terminated != nil { + lastTermination := cs.LastTerminationState.Terminated + if lastTermination.Reason == "StartError" && + strings.Contains(lastTermination.Message, "cpu.cfs_quota_us: invalid argument: unknown") { + return true + } + } + } + return false +} diff --git a/test/e2e/framework/cloneset_util.go b/test/e2e/framework/cloneset_util.go index f6c0b17388..b65e61afae 100644 --- a/test/e2e/framework/cloneset_util.go +++ b/test/e2e/framework/cloneset_util.go @@ -20,14 +20,16 @@ import ( "context" "sort" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" - "github.com/openkruise/kruise/pkg/util" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" imageutils "k8s.io/kubernetes/test/utils/image" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/openkruise/kruise/pkg/util" ) type CloneSetTester struct { @@ -66,6 +68,16 @@ func (t *CloneSetTester) NewCloneSet(name string, replicas int32, updateStrategy Env: []v1.EnvVar{ {Name: "test", Value: "foo"}, }, + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, }, }, }, diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index e2186644ac..cb8348c4a5 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -23,7 +23,6 @@ import ( "strings" "time" - kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" v1 "k8s.io/api/core/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -37,6 +36,8 @@ import ( scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" + kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/onsi/ginkgo" "github.com/onsi/gomega" ) @@ -314,6 +315,11 @@ func KruiseDescribe(text string, body func()) bool { return ginkgo.Describe("[kruise.io] "+text, body) } +// KruisePDescribe is a wrapper function for ginkgo describe. Adds namespacing. +func KruisePDescribe(text string, body func()) bool { + return ginkgo.PDescribe("[kruise.io] "+text, body) +} + // ConformanceIt is a wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. func ConformanceIt(text string, body interface{}, timeout ...float64) bool { return ginkgo.It(text+" [Conformance]", body, timeout...) diff --git a/test/e2e/framework/statefulset_utils.go b/test/e2e/framework/statefulset_utils.go index e9d1fac8ce..d57cd0d331 100644 --- a/test/e2e/framework/statefulset_utils.go +++ b/test/e2e/framework/statefulset_utils.go @@ -874,6 +874,16 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP Image: imageutils.GetE2EImage(imageutils.Nginx), VolumeMounts: mounts, ImagePullPolicy: v1.PullIfNotPresent, + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, }, }, Volumes: vols, diff --git a/test/kind-conf-none-fg.yaml b/test/kind-conf-with-vpa.yaml similarity index 64% rename from test/kind-conf-none-fg.yaml rename to test/kind-conf-with-vpa.yaml index 25d972d418..0aeb86d0c1 100644 --- a/test/kind-conf-none-fg.yaml +++ b/test/kind-conf-with-vpa.yaml @@ -4,4 +4,6 @@ nodes: - role: control-plane - role: worker - role: worker - - role: worker \ No newline at end of file + - role: worker +featureGates: + InPlacePodVerticalScaling: true \ No newline at end of file diff --git a/tools/hack/create-cluster.sh b/tools/hack/create-cluster.sh index 0f99c72b24..9a2c962093 100755 --- a/tools/hack/create-cluster.sh +++ b/tools/hack/create-cluster.sh @@ -18,7 +18,7 @@ set -euo pipefail # Setup default values CLUSTER_NAME=${CLUSTER_NAME:-"ci-testing"} KIND_NODE_TAG=${KIND_NODE_TAG:-"v1.28.7"} -KIND_CONFIG=${KIND_CONFIG:-"test/kind-conf-none-fg.yaml"} +KIND_CONFIG=${KIND_CONFIG:-"test/kind-conf-with-vpa.yaml"} echo "$KIND_NODE_TAG" echo "$CLUSTER_NAME" diff --git a/tools/hack/run-kruise-e2e-test.sh b/tools/hack/run-kruise-e2e-test.sh index d65fd61060..bbb76ddf62 100755 --- a/tools/hack/run-kruise-e2e-test.sh +++ b/tools/hack/run-kruise-e2e-test.sh @@ -18,7 +18,10 @@ set -ex export KUBECONFIG=${HOME}/.kube/config make ginkgo set +e -./bin/ginkgo -timeout 60m -v --focus='\[apps\] StatefulSet' test/e2e +#./bin/ginkgo -p -timeout 60m -v --focus='\[apps\] InplaceVPA' test/e2e +#./bin/ginkgo -p -timeout 60m -v --focus='\[apps\] CloneSet' test/e2e +./bin/ginkgo -p -timeout 60m -v --focus='\[apps\] (CloneSet|InplaceVPA)' test/e2e + retVal=$? restartCount=$(kubectl get pod -n kruise-system -l control-plane=controller-manager --no-headers | awk '{print $4}') if [ "${restartCount}" -eq "0" ];then