diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index 7de573fbcf7..ab75e84d400 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -39,18 +39,16 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + restore-keys: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - name: Unit test run: | @@ -58,8 +56,7 @@ jobs: make ut - name: Install gosec - run: | - curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $(go env GOPATH)/bin 'v${{ env.GOSEC_VERSION }}' + run: curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $(go env GOPATH)/bin 'v${{ env.GOSEC_VERSION }}' - name: Build run: | @@ -117,28 +114,46 @@ jobs: # name: centos8-compile # path: centos8-compile.tar - single-e2e: + k8s-conformance-e2e: + name: Kubernetes Conformance E2E needs: build-kube-ovn - name: 1-master-e2e runs-on: ubuntu-22.04 - timeout-minutes: 30 + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: + ip-family: + - ipv4 + - ipv6 + - dual + mode: + - overlay + - underlay steps: - uses: actions/checkout@v3 - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin + - name: Create the default branch directory + run: mkdir -p test/e2e/source - - name: Download image - uses: actions/download-artifact@v3 + - name: Check out the default branch + uses: actions/checkout@v3 with: - name: kube-ovn + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source - - name: Load Image + - name: Export E2E directory run: | - docker load --input kube-ovn.tar + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi + + - name: Remove DNS search domain + run: | + sudo sed -i '/^search/d' /etc/resolv.conf + sudo systemctl restart docker - uses: actions/setup-go@v3 with: @@ -147,63 +162,91 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile - - name: Init Kind + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Download image + uses: actions/download-artifact@v3 + with: + name: kube-ovn + + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init + sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install + run: make kind-install-${{ matrix.mode }}-${{ matrix.ip-family }} - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + working-directory: ${{ env.E2E_DIR }} + env: + E2E_BRANCH: ${{ matrix.branch }} + E2E_IP_FAMILY: ${{ matrix.ip-family }} + E2E_NETWORK_MODE: ${{ matrix.mode }} + run: make k8s-conformance-e2e - single-helm-e2e: + k8s-netpol-e2e: + name: Kubernetes Network Policy E2E needs: build-kube-ovn - name: 1-master-helm-e2e runs-on: ubuntu-22.04 - timeout-minutes: 30 + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: + ip-family: + - ipv4 + - ipv6 + - dual steps: - uses: actions/checkout@v3 - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin + - name: Create the default branch directory + run: mkdir -p test/e2e/source - - name: Download image - uses: actions/download-artifact@v3 + - name: Check out the default branch + uses: actions/checkout@v3 with: - name: kube-ovn + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source - - name: Load Image + - name: Export E2E directory run: | - docker load --input kube-ovn.tar + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi + + - name: Remove DNS search domain + run: | + sudo sed -i '/^search/d' /etc/resolv.conf + sudo systemctl restart docker - uses: actions/setup-go@v3 with: @@ -212,67 +255,84 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile - - name: Init Kind + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Download image + uses: actions/download-artifact@v3 + with: + name: kube-ovn + + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init + sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: | - docker load --input kube-ovn.tar - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - sudo make kind-helm-install - kubectl rollout status deployment/ovn-central -n kube-system --timeout 300s - kubectl rollout status deployment/kube-ovn-controller -n kube-system --timeout 300s - kubectl rollout status daemonset/kube-ovn-cni -n kube-system --timeout 300s - - - name: Copy Kubelet-ko - run: | - sh -c ' - set -e - kubectl -n kube-system get pods --no-headers | grep kube-ovn-controller | awk "{print \$1}" | while read pod; do - sudo kubectl -n kube-system cp $pod:/kube-ovn/kubectl-ko /usr/bin/kubectl-ko - done - sudo chmod +x /usr/bin/kubectl-ko - ' + run: make kind-install-${{ matrix.ip-family }} - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + working-directory: ${{ env.E2E_DIR }} + run: make k8s-netpol-e2e - single-iptables-e2e: + cyclonus-netpol-e2e: + name: Cyclonus Network Policy E2E needs: build-kube-ovn - name: 1-master-iptables-e2e runs-on: ubuntu-22.04 timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + ip-family: + - ipv4 + - ipv6 + - dual steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Create the default branch directory + run: mkdir -p test/e2e/source + + - name: Check out the default branch + uses: actions/checkout@v3 + with: + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source + + - name: Export E2E directory + run: | + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi + + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -283,9 +343,59 @@ jobs: with: name: kube-ovn - - name: Load Image + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster run: | - docker load --input kube-ovn.tar + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + run: make kind-install-${{ matrix.ip-family }} + + - name: Run E2E + working-directory: ${{ env.E2E_DIR }} + run: make cyclonus-netpol-e2e + + kube-ovn-conformance-e2e: + name: Kube-OVN Conformance E2E + needs: build-kube-ovn + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + ip-family: + - ipv4 + - ipv6 + - dual + mode: + - overlay + - underlay + steps: + - uses: actions/checkout@v3 + + - name: Create the default branch directory + run: mkdir -p test/e2e/source + + - name: Check out the default branch + uses: actions/checkout@v3 + with: + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source + + - name: Export E2E directory + run: | + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi - uses: actions/setup-go@v3 with: @@ -294,63 +404,81 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin - - name: Init Kind + - name: Download image + uses: actions/download-artifact@v3 + with: + name: kube-ovn + + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-iptables + sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install + run: make kind-install-${{ matrix.mode }}-${{ matrix.ip-family }} - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e + working-directory: ${{ env.E2E_DIR }} + env: + E2E_IP_FAMILY: ${{ matrix.ip-family }} + E2E_NETWORK_MODE: ${{ matrix.mode }} + run: make kube-ovn-conformance-e2e - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - single-vlan-e2e: + kube-ovn-ic-conformance-e2e: + name: Kube-OVN IC Conformance E2E needs: build-kube-ovn - name: 1-master-vlan-e2e runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin + - name: Create the default branch directory + run: mkdir -p test/e2e/source - - name: Download image - uses: actions/download-artifact@v3 + - name: Check out the default branch + uses: actions/checkout@v3 with: - name: kube-ovn + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source - - name: Load Image + - name: Export E2E directory run: | - docker load --input kube-ovn.tar + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi - uses: actions/setup-go@v3 with: @@ -358,51 +486,57 @@ jobs: check-latest: true id: go - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Download image + uses: actions/download-artifact@v3 + with: + name: kube-ovn + + - name: Load image + run: docker load --input kube-ovn.tar - - name: Init Kind + - name: Create kind clusters run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init + sudo PATH=~/.local/bin:$PATH make kind-init-ovn-ic + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install + run: make kind-install-ovn-ic - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-vlan - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + working-directory: ${{ env.E2E_DIR }} + run: make kube-ovn-ic-conformance-e2e - single-underlay-e2e-single-nic: + chart-installation-test: needs: build-kube-ovn - name: 1-master-underlay-e2e-single-nic + name: Chart Installation Test runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -413,84 +547,36 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + - name: Load image + run: docker load --input kube-ovn.tar - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install-underlay - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-underlay-single-nic - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh + make kind-install-chart + kubectl rollout status deployment/ovn-central -n kube-system --timeout 300s + kubectl rollout status deployment/kube-ovn-controller -n kube-system --timeout 300s + kubectl rollout status daemonset/kube-ovn-cni -n kube-system --timeout 300s - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - - name: Check Node Network - run: | - sh -c ' - set -e - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.IPAddress}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping -c1 -w1 $ip - done - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping6 -c1 -w1 $ip - done - ' - - single-node-e2e: + ha-installation-test: needs: build-kube-ovn - name: 1-node-e2e + name: HA Installation Test runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -501,61 +587,32 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + - name: Load image + run: docker load --input kube-ovn.tar - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-single + sudo PATH=~/.local/bin:$PATH make kind-init-ha + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e + run: sudo ENABLE_SSL=true make kind-install - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - ha-e2e: + underlay-logical-gateway-installation-test: needs: build-kube-ovn - name: ha-e2e + name: Underlay Logical Gateway Installation Test runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -566,73 +623,32 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + - name: Load image + run: docker load --input kube-ovn.tar - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ha + sudo PATH=~/.local/bin:$PATH make kind-init-dual + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: | - sudo make kind-install - sudo kubectl patch subnet ovn-default --type merge \ - -p '{"spec":{"gatewayType": "centralized", "gatewayNode": "kube-ovn-control-plane"}}' - sudo kubectl -n kube-system patch svc kube-ovn-pinger --type merge \ - -p '{"spec":{"type": "NodePort", "externalTrafficPolicy": "Local"}}' - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh + run: make kind-install-underlay-logical-gateway-dual - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - ipv6-e2e: + no-ovn-lb-test: needs: build-kube-ovn - name: ipv6-e2e + name: Disable OVN LB Test runs-on: ubuntu-22.04 - timeout-minutes: 45 + timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -643,68 +659,34 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + - name: Load image + run: docker load --input kube-ovn.tar - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ipv6 - - - name: Install Kube-OVN - run: sudo make kind-install-ipv6 - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-ipv6 + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh + - name: Install Kube-OVN without LoadBalancer + env: + ENABLE_LB: "false" + run: make kind-install - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - ipv6-vlan-e2e: + no-np-test: needs: build-kube-ovn - name: ipv6-vlan-e2e + name: Disable Network Policy Test runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -715,74 +697,52 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ipv6 + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install-ipv6 - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-vlan-ipv6 + env: + ENABLE_NP: "false" + run: make kind-install - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - ipv6-underlay-e2e-single-nic: - needs: build-kube-ovn - name: ipv6-underlay-e2e-single-nic + lb-svc-e2e: + needs: + - build-kube-ovn + - build-vpc-nat-gateway + name: LB Service E2E runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin + - name: Create the default branch directory + run: mkdir -p test/e2e/source - - name: Download image - uses: actions/download-artifact@v3 + - name: Check out the default branch + uses: actions/checkout@v3 with: - name: kube-ovn + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source - - name: Load Image + - name: Export E2E directory run: | - docker load --input kube-ovn.tar + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi - uses: actions/setup-go@v3 with: @@ -791,293 +751,69 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ipv6 + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- - - name: Install Kube-OVN - run: sudo make kind-install-underlay-ipv6 + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-underlay-single-nic - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - - name: Check Node Network - run: | - sh -c ' - set -e - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.IPAddress}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping -c1 -w1 $ip - done - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping6 -c1 -w1 $ip - done - ' - - dual-stack-e2e: - needs: build-kube-ovn - name: dual-stack-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind sudo mv kind /usr/local/bin - - name: Download image + - name: Download kube-ovn image uses: actions/download-artifact@v3 with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-dual - - - name: Install Kube-OVN - run: sudo make kind-install-dual - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - dual-stack-underlay-e2e-single-nic: - needs: build-kube-ovn - name: dual-stack-underlay-e2e-single-nic - runs-on: ubuntu-22.04 - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image + - name: Download vpc-nat-gateway image uses: actions/download-artifact@v3 with: - name: kube-ovn + name: vpc-nat-gateway - - name: Load Image + - name: Load images run: | - docker load --input kube-ovn.tar + docker load -i kube-ovn.tar + docker load -i vpc-nat-gateway.tar - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-dual - - - name: Install Kube-OVN - run: sudo make kind-install-underlay-dual - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-underlay-single-nic - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - - name: Check Node Network - run: | - sh -c ' - set -e - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.IPAddress}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping -c1 -w1 $ip - done - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping6 -c1 -w1 $ip - done - ' - - dual-stack-underlay-hairpin-e2e-single-nic: - needs: build-kube-ovn - name: dual-stack-underlay-hairpin-e2e-single-nic - runs-on: ubuntu-22.04 - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-dual + - name: Install Multus + run: make kind-install-multus - name: Install Kube-OVN - run: sudo make kind-install-underlay-hairpin-dual + run: make kind-install-lb-svc - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-underlay-single-nic + working-directory: ${{ env.E2E_DIR }} + run: make kube-ovn-lb-svc-conformance-e2e - dual-stack-underlay-logical-gw-e2e: + installation-compatibility-test: needs: build-kube-ovn - name: dual-stack-underlay-logical-gateway-e2e + name: Installation Compatibility Test runs-on: ubuntu-22.04 - timeout-minutes: 45 + timeout-minutes: 10 steps: - uses: actions/checkout@v3 - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -1088,227 +824,56 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar + - name: Load image + run: docker load --input kube-ovn.tar - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-dual + sudo PATH=~/.local/bin:$PATH k8s_version=v1.23.13 make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo make kind-install-underlay-logical-gateway-dual - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Run networkpolicy E2E - working-directory: test/networkpolicy-cyclonus/ - run: | - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - bash ./start-test.sh + run: make kind-install - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh - - name: Check Node Network - run: | - sh -c ' - set -e - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.IPAddress}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping -c1 -w1 $ip - done - kubectl get no --no-headers | awk "{print \$1}" | while read node; do - docker inspect $node -f "{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}" - done | while read ip; do - docker run --rm --network kind kubeovn/kube-ovn:$(cat VERSION) ping6 -c1 -w1 $ip - done - ' - - no-lb-e2e: + cilium-chaining-e2e: needs: build-kube-ovn - name: disable-loadbalancer-e2e + name: Cilium Chaining E2E runs-on: ubuntu-22.04 timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init - - - name: Install Kube-OVN without LoadBalancer - run: sudo ENABLE_LB=false make kind-install - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - no-lb-iptables-e2e: - needs: build-kube-ovn - name: disable-loadbalancer-iptables-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 + - uses: azure/setup-helm@v3 with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go + version: '${{ env.HELM_VERSION }}' - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + - name: Create the default branch directory + run: mkdir -p test/e2e/source - - name: Go Cache - uses: actions/cache@v3 + - name: Check out the default branch + uses: actions/checkout@v3 with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source - - name: Init Kind + - name: Export E2E directory run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-iptables + if [ '${{ github.base_ref || github.ref }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi - - name: Install Kube-OVN without LoadBalancer - run: sudo ENABLE_LB=false make kind-install - - - name: Run E2E + - name: Remove DNS search domain run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - no-np-e2e: - needs: build-kube-ovn - name: disable-network-policy-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar + sudo sed -i '/^search/d' /etc/resolv.conf + sudo systemctl restart docker - uses: actions/setup-go@v3 with: @@ -1317,52 +882,22 @@ jobs: id: go - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- - - name: Install Kube-OVN - run: sudo ENABLE_NP=false make kind-install + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e - - - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - multus-e2e: - needs: - - build-kube-ovn - - build-vpc-nat-gateway - name: multus-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind @@ -1373,250 +908,43 @@ jobs: with: name: kube-ovn - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: vpc-nat-gateway - - - name: Load Image - run: | - docker load --input vpc-nat-gateway.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go + - name: Load image + run: docker load --input kube-ovn.tar - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind + - name: Create kind cluster run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" sudo PATH=~/.local/bin:$PATH make kind-init - - - name: Install Kube-OVN & Multus - run: sudo make kind-install-multus - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-multus - - ovn-ic-e2e: - needs: build-kube-ovn - name: 1-master-ovn-ic-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ovn-ic + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - run: sudo PATH=~/.local/bin:$PATH make kind-install-ovn-ic + run: make kind-install - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - make e2e-ovn-ic + working-directory: ${{ env.E2E_DIR }} + run: make k8s-conformance-e2e - name: Cleanup - run: | - sh -c 'while :; do if [ $(kubectl get --no-headers subnet | wc -l) -eq 2 ]; then break; fi; sleep 5; done' - sh dist/images/cleanup.sh - - cilium-e2e: - needs: build-kube-ovn - name: 1-master-cilium-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - uses: azure/setup-helm@v3 - with: - version: '${{ env.HELM_VERSION }}' - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - uses: actions/setup-go@v3 - with: - go-version: '${{ env.GO_VERSION }}' - check-latest: true - id: go - - - name: Export Go full version - run: | - echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - - name: Go Cache - uses: actions/cache@v3 - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ env.GO_FULL_VER }}-x86- - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-cilium - - - name: Install Kube-OVN - run: sudo PATH=~/.local/bin:$PATH make kind-install-cilium - - - name: Run E2E - run: | - go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - sudo kubectl cluster-info - sudo cp -r /root/.kube/ /home/runner/.kube/ - sudo chmod -R 777 /home/runner/.kube/ - docker exec kube-ovn-control-plane bash -c "ls -al /etc/cni/net.d/" - kubectl get pods -A - make e2e-cilium - make e2e - - - name: Cleanup - run: | - sudo sh dist/images/cleanup.sh - - installation-compatibility-e2e: - needs: build-kube-ovn - name: installation-compatibility-e2e - runs-on: ubuntu-22.04 - timeout-minutes: 10 - steps: - - uses: actions/checkout@v3 - - - name: Install Kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load Image - run: | - docker load --input kube-ovn.tar - - - name: Init Kind - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH k8s_version=v1.23.13 make kind-init - - - name: Install Kube-OVN - run: sudo PATH=~/.local/bin:$PATH make kind-install - - - name: Cleanup - run: | - sudo sh dist/images/cleanup.sh + run: sh dist/images/cleanup.sh push: needs: - build-centos-compile - - single-e2e - - single-iptables-e2e - - single-vlan-e2e - - single-underlay-e2e-single-nic - - single-node-e2e - - ha-e2e - - ipv6-e2e - - ipv6-vlan-e2e - - ipv6-underlay-e2e-single-nic - - dual-stack-e2e - - dual-stack-underlay-logical-gw-e2e - - dual-stack-underlay-e2e-single-nic - - dual-stack-underlay-hairpin-e2e-single-nic - - no-lb-e2e - - no-lb-iptables-e2e - - no-np-e2e - - multus-e2e - - ovn-ic-e2e - - cilium-e2e - - installation-compatibility-e2e + - k8s-conformance-e2e + # - k8s-netpol-e2e + # - cyclonus-netpol-e2e + - kube-ovn-conformance-e2e + - kube-ovn-ic-conformance-e2e + - lb-svc-e2e + - ha-installation-test + - underlay-logical-gateway-installation-test + - chart-installation-test + - installation-compatibility-test + - no-ovn-lb-test + - no-np-test + - cilium-chaining-e2e name: push runs-on: ubuntu-22.04 steps: @@ -1642,7 +970,7 @@ jobs: # with: # name: centos8-compile - - name: Load Image + - name: Load image run: | docker load --input kube-ovn.tar docker load --input vpc-nat-gateway.tar diff --git a/.github/workflows/scheduled-e2e.yaml b/.github/workflows/scheduled-e2e.yaml index 9bfa93458a8..89c1c8d1953 100644 --- a/.github/workflows/scheduled-e2e.yaml +++ b/.github/workflows/scheduled-e2e.yaml @@ -48,54 +48,52 @@ jobs: - name: Export Go full version run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- - name: Create branch directory - run: mkdir -p test/k8s-network/branches/${{ matrix.branch }} + run: mkdir -p test/e2e/k8s-network/branches/${{ matrix.branch }} - name: Check out branch uses: actions/checkout@v3 with: ref: ${{ matrix.branch }} fetch-depth: 1 - path: test/k8s-network/branches/${{ matrix.branch }} + path: test/e2e/k8s-network/branches/${{ matrix.branch }} - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind sudo mv kind /usr/local/bin - - name: Create cluster - working-directory: test/k8s-network/branches/${{ matrix.branch }} + - name: Create kind cluster + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - working-directory: test/k8s-network/branches/${{ matrix.branch }} + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | docker pull kubeovn/kube-ovn:$(cat VERSION) - sudo make kind-install-${{ matrix.mode }}-${{ matrix.ip-family }} + make kind-install-${{ matrix.mode }}-${{ matrix.ip-family }} - name: Run E2E env: E2E_BRANCH: ${{ matrix.branch }} E2E_IP_FAMILY: ${{ matrix.ip-family }} E2E_NETWORK_MODE: ${{ matrix.mode }} - run: | - sudo cp -r /root/.kube/ ~/.kube/ - sudo chmod -R 777 ~/.kube/ - KUBECONFIG=~/.kube/config make k8s-conformance-e2e + run: make k8s-conformance-e2e k8s-netpol-e2e: name: Kubernetes Network Policy E2E @@ -127,50 +125,48 @@ jobs: - name: Export Go full version run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" - - name: Go Cache + - name: Go cache uses: actions/cache@v3 with: path: | ~/.cache/go-build ~/go/pkg/mod key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- - name: Create branch directory - run: mkdir -p test/k8s-network/branches/${{ matrix.branch }} + run: mkdir -p test/e2e/k8s-network/branches/${{ matrix.branch }} - name: Check out branch uses: actions/checkout@v3 with: ref: ${{ matrix.branch }} fetch-depth: 1 - path: test/k8s-network/branches/${{ matrix.branch }} + path: test/e2e/k8s-network/branches/${{ matrix.branch }} - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind sudo mv kind /usr/local/bin - - name: Create cluster - working-directory: test/k8s-network/branches/${{ matrix.branch }} + - name: Create kind cluster + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - working-directory: test/k8s-network/branches/${{ matrix.branch }} + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | docker pull kubeovn/kube-ovn:$(cat VERSION) - sudo make kind-install-${{ matrix.ip-family }} + make kind-install-${{ matrix.ip-family }} - name: Run E2E - run: | - sudo cp -r /root/.kube/ ~/.kube/ - sudo chmod -R 777 ~/.kube/ - KUBECONFIG=~/.kube/config make k8s-netpol-e2e + run: make k8s-netpol-e2e cyclonus-netpol-e2e: name: Cyclonus Network Policy E2E @@ -194,36 +190,557 @@ jobs: sudo systemctl restart docker - name: Create branch directory - run: mkdir -p test/k8s-network/branches/${{ matrix.branch }} + run: mkdir -p test/e2e/k8s-network/branches/${{ matrix.branch }} - name: Check out branch uses: actions/checkout@v3 with: ref: ${{ matrix.branch }} fetch-depth: 1 - path: test/k8s-network/branches/${{ matrix.branch }} + path: test/e2e/k8s-network/branches/${{ matrix.branch }} - - name: Install Kind + - name: Install kind run: | curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 chmod +x ./kind sudo mv kind /usr/local/bin - - name: Create cluster - working-directory: test/k8s-network/branches/${{ matrix.branch }} + - name: Create kind cluster + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | sudo pip3 install j2cli sudo pip3 install "j2cli[yaml]" sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ - name: Install Kube-OVN - working-directory: test/k8s-network/branches/${{ matrix.branch }} + working-directory: test/e2e/k8s-network/branches/${{ matrix.branch }} run: | docker pull kubeovn/kube-ovn:$(cat VERSION) - sudo make kind-install-${{ matrix.ip-family }} + make kind-install-${{ matrix.ip-family }} - name: Run E2E + run: make cyclonus-netpol-e2e + + kube-ovn-conformance-e2e: + name: Kube-OVN Conformance E2E + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + ip-family: + - ipv4 + - ipv6 + - dual + mode: + - overlay + - underlay + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-${{ matrix.ip-family }} sudo cp -r /root/.kube/ ~/.kube/ - sudo chmod -R 777 ~/.kube/ - make cyclonus-netpol-e2e + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install-${{ matrix.mode }}-${{ matrix.ip-family }} + + - name: Run E2E + env: + E2E_BRANCH: ${{ matrix.branch }} + E2E_IP_FAMILY: ${{ matrix.ip-family }} + E2E_NETWORK_MODE: ${{ matrix.mode }} + run: make kube-ovn-conformance-e2e + + kube-ovn-ic-conformance-e2e: + name: Kube-OVN IC Conformance E2E + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-ovn-ic + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install-ovn-ic + + - name: Run E2E + env: + E2E_BRANCH: ${{ matrix.branch }} + run: make kube-ovn-ic-conformance-e2e + + ha-installation-test: + name: HA Installation Test + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-ha + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + sudo ENABLE_SSL=true make kind-install + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh + + underlay-logical-gateway-installation-test: + name: Underlay Logical Gateway Installation Test + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-dual + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install-underlay-logical-gateway-dual + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh + + no-ovn-lb-test: + name: Disable OVN LB Test + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN without LoadBalancer + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + env: + ENABLE_LB: "false" + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh + + no-np-test: + name: Disable Network Policy Test + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + env: + ENABLE_NP: "false" + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh + + lb-svc-e2e: + name: LB Service E2E + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Multus + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: make kind-install-multus + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + docker pull kubeovn/vpc-nat-gateway:$(cat VERSION) + make kind-install-lb-svc + + - name: Run E2E + run: make kube-ovn-lb-svc-conformance-e2e + + installation-compatibility-test: + name: Installation Compatibility Test + runs-on: ubuntu-22.04 + timeout-minutes: 10 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH k8s_version=v1.23.13 make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh + + cilium-chaining-e2e: + name: Cilium Chaining E2E + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + steps: + - uses: actions/checkout@v3 + - uses: azure/setup-helm@v3 + with: + version: '${{ env.HELM_VERSION }}' + + - name: Remove DNS search domain + run: | + sudo sed -i '/^search/d' /etc/resolv.conf + sudo systemctl restart docker + + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + docker pull kubeovn/kube-ovn:$(cat VERSION) + make kind-install + + - name: Run E2E + run: make k8s-conformance-e2e + + - name: Cleanup + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: sh dist/images/cleanup.sh diff --git a/.gitignore b/.gitignore index 538d4c60d6a..00cfb1f8674 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ dist/windows/kube-ovn-daemon.exe test/e2e/ovnnb_db.* test/e2e/ovnsb_db.* kube-ovn.yaml +kube-ovn-crd.yaml ovn.yaml ovn-ic-0.yaml ovn-ic-1.yaml @@ -18,4 +19,3 @@ vpc-nat-gateway.tar image-amd64.tar image-arm64.tar test/**/*.test -test/**/network.json diff --git a/Makefile b/Makefile index e3e83abbab8..d2190d46d3a 100644 --- a/Makefile +++ b/Makefile @@ -278,8 +278,8 @@ kind-untaint-control-plane: done; \ done -.PHONY: kind-helm-install -kind-helm-install: kind-untaint-control-plane +.PHONY: kind-install-chart +kind-install-chart: kind-untaint-control-plane kubectl label no -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite kubectl label no -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite kubectl label no -lovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel --overwrite @@ -294,7 +294,7 @@ kind-helm-install: kind-untaint-control-plane kind-install: kind-load-image kubectl config use-context kind-kube-ovn @$(MAKE) kind-untaint-control-plane - ENABLE_SSL=true dist/images/install.sh + bash dist/images/install.sh kubectl describe no .PHONY: kind-install-dev @@ -318,7 +318,7 @@ kind-install-ovn-ic: kind-load-image kind-install -e 's/10.96.0/10.98.0/g' \ -e 's/100.64.0/100.68.0/g' \ dist/images/install.sh | \ - ENABLE_SSL=true bash + bash kubectl describe no docker run -d --name ovn-ic-db --network kind $(REGISTRY)/kube-ovn:$(RELEASE_TAG) bash start-ic-db.sh @@ -350,7 +350,7 @@ kind-install-underlay-ipv4: kind-disable-hairpin kind-load-image kind-untaint-co -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV4_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + ENABLE_VLAN=true VLAN_NIC=eth0 bash kubectl describe no .PHONY: kind-install-underlay-hairpin-ipv4 @@ -361,7 +361,7 @@ kind-install-underlay-hairpin-ipv4: kind-enable-hairpin kind-load-image kind-unt -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV4_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + ENABLE_VLAN=true VLAN_NIC=eth0 bash kubectl describe no .PHONY: kind-install-ipv6 @@ -379,7 +379,7 @@ kind-install-underlay-ipv6: kind-disable-hairpin kind-load-image kind-untaint-co -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV6_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true IPV6=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + IPV6=true ENABLE_VLAN=true VLAN_NIC=eth0 bash .PHONY: kind-install-underlay-hairpin-ipv6 kind-install-underlay-hairpin-ipv6: kind-enable-hairpin kind-load-image kind-untaint-control-plane @@ -389,7 +389,7 @@ kind-install-underlay-hairpin-ipv6: kind-enable-hairpin kind-load-image kind-unt -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV6_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true IPV6=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + IPV6=true ENABLE_VLAN=true VLAN_NIC=eth0 bash .PHONY: kind-install-dual kind-install-dual: kind-install-overlay-dual @@ -406,7 +406,7 @@ kind-install-underlay-dual: kind-disable-hairpin kind-load-image kind-untaint-co -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV4_EXCLUDE_IPS),$(KIND_IPV6_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true DUAL_STACK=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + DUAL_STACK=true ENABLE_VLAN=true VLAN_NIC=eth0 bash .PHONY: kind-install-underlay-hairpin-dual kind-install-underlay-hairpin-dual: kind-enable-hairpin kind-load-image kind-untaint-control-plane @@ -416,7 +416,7 @@ kind-install-underlay-hairpin-dual: kind-enable-hairpin kind-load-image kind-unt -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV4_EXCLUDE_IPS),$(KIND_IPV6_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true DUAL_STACK=true ENABLE_VLAN=true VLAN_NIC=eth0 bash + DUAL_STACK=true ENABLE_VLAN=true VLAN_NIC=eth0 bash .PHONY: kind-install-underlay-logical-gateway-dual kind-install-underlay-logical-gateway-dual: kind-disable-hairpin kind-load-image kind-untaint-control-plane @@ -426,18 +426,21 @@ kind-install-underlay-logical-gateway-dual: kind-disable-hairpin kind-load-image -e 's@^[[:space:]]*EXCLUDE_IPS=.*@EXCLUDE_IPS="$(KIND_IPV4_GATEWAY),$(KIND_IPV4_EXCLUDE_IPS),$(KIND_IPV6_GATEWAY),$(KIND_IPV6_EXCLUDE_IPS)"@' \ -e 's@^VLAN_ID=.*@VLAN_ID="0"@' \ dist/images/install.sh | \ - ENABLE_SSL=true DUAL_STACK=true ENABLE_VLAN=true \ + DUAL_STACK=true ENABLE_VLAN=true \ VLAN_NIC=eth0 LOGICAL_GATEWAY=true bash .PHONY: kind-install-multus -kind-install-multus: kind-load-image kind-untaint-control-plane +kind-install-multus: $(call docker_ensure_image_exists,$(MULTUS_IMAGE)) $(call kind_load_image,kube-ovn,$(MULTUS_IMAGE)) - $(call kind_load_image,kube-ovn,$(VPC_NAT_GW_IMG)) kubectl apply -f "$(MULTUS_YAML)" kubectl -n kube-system rollout status ds kube-multus-ds + +.PHONY: kind-install-lb-svc +kind-install-lb-svc: kind-load-image kind-untaint-control-plane + $(call kind_load_image,kube-ovn,$(VPC_NAT_GW_IMG)) kubectl apply -f yamls/lb-svc-attachment.yaml - ENABLE_SSL=true ENABLE_LB_SVC=true CNI_CONFIG_PRIORITY=10 dist/images/install.sh + ENABLE_LB_SVC=true CNI_CONFIG_PRIORITY=10 dist/images/install.sh kubectl describe no .PHONY: kind-install-cilium @@ -460,7 +463,7 @@ kind-install-cilium: kind-load-image kind-untaint-control-plane --set cni.configMap=cni-configuration kubectl -n kube-system rollout status ds cilium --timeout 300s bash dist/images/cilium.sh - ENABLE_SSL=true ENABLE_LB=false ENABLE_NP=false WITHOUT_KUBE_PROXY=true CNI_CONFIG_PRIORITY=10 bash dist/images/install.sh + ENABLE_LB=false ENABLE_NP=false WITHOUT_KUBE_PROXY=true CNI_CONFIG_PRIORITY=10 bash dist/images/install.sh kubectl describe no .PHONY: kind-reload @@ -474,12 +477,12 @@ kind-reload-ovs: kind-load-image kubectl delete pod -n kube-system -l app=ovs .PHONY: kind-clean -kind-clean: kind-disable-hairpin - $(call docker_rm_container,kube-ovn-e2e) +kind-clean: kind delete cluster --name=kube-ovn .PHONY: kind-clean-ovn-ic kind-clean-ovn-ic: kind-clean + $(call docker_rm_container,ovn-ic-db) kind delete cluster --name=kube-ovn1 .PHONY: uninstall @@ -516,63 +519,6 @@ ipam-bench: go test -timeout 30m -bench='^BenchmarkIPAM' -benchtime=10000x test/unittest/ipam_bench/ipam_test.go -args -logtostderr=false go test -timeout 90m -bench='^BenchmarkParallelIPAM' -benchtime=10x test/unittest/ipam_bench/ipam_test.go -args -logtostderr=false -.PHONY: e2e -e2e: - $(call docker_create_vlan_network) - $(eval NODE_COUNT = $(shell kind get nodes --name kube-ovn | wc -l)) - $(eval E2E_NETWORK_INFO = $(shell docker inspect -f '{{json (index .NetworkSettings.Networks "$(E2E_NETWORK)")}}' kube-ovn-control-plane)) - $(call docker_rm_container,kube-ovn-e2e) - docker run -d --name kube-ovn-e2e --network kind --cap-add=NET_ADMIN $(REGISTRY)/kube-ovn:$(RELEASE_TAG) sleep infinity - @if [ '$(E2E_NETWORK_INFO)' = 'null' ]; then \ - kind get nodes --name kube-ovn | while read node; do \ - docker network connect $(E2E_NETWORK) $$node; \ - done; \ - fi - $(call docker_config_bridge,$(E2E_NETWORK),0,$(VLAN_ID)) - - @echo "{" > test/e2e/network.json - @i=0; kind get nodes --name kube-ovn | while read node; do \ - i=$$((i+1)); \ - printf '"%s": ' "$$node" >> test/e2e/network.json; \ - docker inspect -f '{{json (index .NetworkSettings.Networks "$(E2E_NETWORK)")}}' "$$node" >> test/e2e/network.json; \ - if [ $$i -ne $(NODE_COUNT) ]; then echo "," >> test/e2e/network.json; fi; \ - done - @echo "}" >> test/e2e/network.json - - $(call docker_ensure_image_exists,kubeovn/pause:3.2) - $(call kind_load_image,kube-ovn,kubeovn/pause:3.2) - ginkgo -mod=mod -progress --always-emit-ginkgo-writer --slow-spec-threshold=60s test/e2e - -.PHONY: e2e-ipv6 -e2e-ipv6: - @IPV6=true $(MAKE) e2e - -.PHONY: e2e-vlan -e2e-vlan: - @VLAN_ID=100 $(MAKE) e2e - -.PHONY: e2e-vlan-ipv6 -e2e-vlan-ipv6: - @IPV6=true $(MAKE) e2e-vlan - -.PHONY: e2e-underlay-single-nic -e2e-underlay-single-nic: - @docker inspect -f '{{json .NetworkSettings.Networks.kind}}' kube-ovn-control-plane > test/e2e-underlay-single-nic/node/network.json - ginkgo -mod=mod -progress --always-emit-ginkgo-writer --slow-spec-threshold=60s test/e2e-underlay-single-nic - -.PHONY: e2e-ovn-ic -e2e-ovn-ic: - ginkgo -mod=mod -progress --always-emit-ginkgo-writer --slow-spec-threshold=60s test/e2e-ovn-ic - -.PHONY: e2e-cilium -e2e-cilium: - docker run -d --name kube-ovn-e2e --network kind --cap-add=NET_ADMIN $(REGISTRY)/kube-ovn:$(RELEASE_TAG) sleep infinity - ginkgo -mod=mod -progress --always-emit-ginkgo-writer --slow-spec-threshold=60s test/e2e-cilium - -.PHONY: e2e-multus -e2e-multus: - ginkgo -mod=mod -progress --always-emit-ginkgo-writer --slow-spec-threshold=60s test/e2e-multus - .PHONY: clean clean: $(RM) dist/images/kube-ovn dist/images/kube-ovn-cmd @@ -580,8 +526,6 @@ clean: $(RM) ovn.yaml kube-ovn.yaml kube-ovn-crd.yaml $(RM) ovn-ic-0.yaml ovn-ic-1.yaml $(RM) kube-ovn.tar vpc-nat-gateway.tar image-amd64.tar image-arm64.tar - $(RM) test/e2e/ovnnb_db.* test/e2e/ovnsb_db.* - $(RM) test/e2e/network.json test/e2e-underlay-single-nic/node/network.json .PHONY: changelog changelog: diff --git a/Makefile.e2e b/Makefile.e2e index 237daa3c239..44dbf0af2eb 100644 --- a/Makefile.e2e +++ b/Makefile.e2e @@ -21,17 +21,27 @@ define ginkgo_option --ginkgo.$(1)=$(shell echo '$(2)' | sed -E 's/^[[:space:]]+//' | sed -E 's/"[[:space:]]+"/" --ginkgo.$(1)="/g') endef +.PHONY: e2e +e2e: kube-ovn-conformance-e2e + +.PHONY: e2e-compile +e2e-compile: + go test ./test/e2e/k8s-network -c -o test/e2e/k8s-network/e2e.test + go test ./test/e2e/kube-ovn -c -o test/e2e/kube-ovn/e2e.test + go test ./test/e2e/ovn-ic -c -o test/e2e/ovn-ic/e2e.test + go test ./test/e2e/lb-svc -c -o test/e2e/lb-svc/e2e.test + .PHONY: k8s-conformance-e2e k8s-conformance-e2e: - go test ./test/k8s-network -c -o test/k8s-network/e2e.test - ./test/k8s-network/e2e.test --ginkgo.timeout=1h \ + go test ./test/e2e/k8s-network -c -o test/e2e/k8s-network/e2e.test + ./test/e2e/k8s-network/e2e.test --ginkgo.timeout=1h \ $(call ginkgo_option,focus,$(K8S_CONFORMANCE_E2E_FOCUS)) \ $(call ginkgo_option,skip,$(K8S_CONFORMANCE_E2E_SKIP)) .PHONY: k8s-netpol-e2e k8s-netpol-e2e: - go test ./test/k8s-network -c -o test/k8s-network/e2e.test - ./test/k8s-network/e2e.test --ginkgo.timeout=2h \ + go test ./test/e2e/k8s-network -c -o test/e2e/k8s-network/e2e.test + ./test/e2e/k8s-network/e2e.test --ginkgo.timeout=2h \ $(call ginkgo_option,focus,$(K8S_NETPOL_E2E_FOCUS)) \ $(call ginkgo_option,skip,$(K8S_NETPOL_E2E_SKIP)) @@ -40,7 +50,7 @@ cyclonus-netpol-e2e: kubectl create ns netpol kubectl create clusterrolebinding cyclonus --clusterrole=cluster-admin --serviceaccount=netpol:cyclonus kubectl create sa cyclonus -n netpol - kubectl create -f test/cyclonus.yaml -n netpol + kubectl create -f test/e2e/cyclonus.yaml -n netpol while ! kubectl wait pod --for=condition=Ready -l job-name=cyclonus -n netpol; do \ sleep 3; \ done @@ -48,3 +58,18 @@ cyclonus-netpol-e2e: kubectl -n netpol logs \ $$(kubectl -n netpol get pod -l job-name=cyclonus -o=jsonpath={.items[0].metadata.name}) | \ grep failed; test $$? -ne 0 + +.PHONY: kube-ovn-conformance-e2e +kube-ovn-conformance-e2e: + go test ./test/e2e/kube-ovn -c -o test/e2e/kube-ovn/e2e.test + ./test/e2e/kube-ovn/e2e.test --ginkgo.focus=CNI:Kube-OVN + +.PHONY: kube-ovn-ic-conformance-e2e +kube-ovn-ic-conformance-e2e: + go test ./test/e2e/ovn-ic -c -o test/e2e/ovn-ic/e2e.test + ./test/e2e/ovn-ic/e2e.test --ginkgo.focus=CNI:Kube-OVN + +.PHONY: kube-ovn-lb-svc-conformance-e2e +kube-ovn-lb-svc-conformance-e2e: + go test ./test/e2e/lb-svc -c -o test/e2e/lb-svc/e2e.test + ./test/e2e/lb-svc/e2e.test --ginkgo.focus=CNI:Kube-OVN diff --git a/go.mod b/go.mod index d2a9099b14f..88e7cf103d2 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.1.1 github.com/coreos/go-iptables v0.6.0 + github.com/docker/docker v20.10.22+incompatible github.com/emicklei/go-restful/v3 v3.10.1 github.com/evanphx/json-patch/v5 v5.6.0 github.com/greenpau/ovsdb v1.0.3 @@ -32,7 +33,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.1 github.com/vishvananda/netlink v1.2.1-beta.2 - golang.org/x/exp v0.0.0-20221211140036-ad323defaf05 + golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 golang.org/x/sys v0.3.0 golang.org/x/time v0.3.0 google.golang.org/grpc v1.51.0 @@ -44,10 +45,11 @@ require ( k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.80.1 k8s.io/kubernetes v1.26.0 + k8s.io/pod-security-admission v0.26.0 k8s.io/sample-controller v0.26.0 k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 kubevirt.io/client-go v0.58.0 - sigs.k8s.io/controller-runtime v0.0.0-20221211125314-222fb669e109 + sigs.k8s.io/controller-runtime v0.14.0 ) require ( @@ -89,6 +91,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/eapache/channels v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect @@ -161,6 +164,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.4 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/opencontainers/selinux v1.10.0 // indirect @@ -235,7 +239,7 @@ require ( k8s.io/cluster-bootstrap v0.26.0 // indirect k8s.io/component-base v0.26.0 // indirect k8s.io/component-helpers v0.26.0 // indirect - k8s.io/cri-api v0.20.6 // indirect + k8s.io/cri-api v0.26.0 // indirect k8s.io/csi-translation-lib v0.26.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect k8s.io/kms v0.26.0 // indirect @@ -246,9 +250,8 @@ require ( k8s.io/kubelet v0.26.0 // indirect k8s.io/legacy-cloud-providers v0.0.0 // indirect k8s.io/mount-utils v0.0.0 // indirect - k8s.io/pod-security-admission v0.0.0 // indirect kubevirt.io/api v0.58.0 // indirect - kubevirt.io/containerized-data-importer-api v1.55.1 // indirect + kubevirt.io/containerized-data-importer-api v1.55.2 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 // indirect moul.io/http2curl v1.0.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect diff --git a/go.sum b/go.sum index 485f9477741..2aa9eec2498 100644 --- a/go.sum +++ b/go.sum @@ -416,8 +416,9 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.22+incompatible h1:6jX4yB+NtcbldT90k7vBSaWJDB3i+zkVJT9BEK8kQkk= +github.com/docker/docker v20.10.22+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1002,6 +1003,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= @@ -1483,8 +1485,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221211140036-ad323defaf05 h1:T8EldfGCcveFMewH5xAYxxoX3PSQMrsechlUGVFlQBU= -golang.org/x/exp v0.0.0-20221211140036-ad323defaf05/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 h1:5oN1Pz/eDhCpbMbLstvIPa0b/BEQo6g6nwV3pLjfM6w= +golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2175,8 +2177,8 @@ k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5 k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v0.58.0 h1:qeNeRtD6AIJ5WVJuRXajmmXtnrO5dYchy+hpCm6QwhE= kubevirt.io/api v0.58.0/go.mod h1:U0CQlZR0JoJCaC+Va0wz4dMOtYDdVywJ98OT1KmOkzI= -kubevirt.io/containerized-data-importer-api v1.55.1 h1:2WJdHrbN7pOTX1KkXKME94PG8i0Shd0DK0/3jP07d/E= -kubevirt.io/containerized-data-importer-api v1.55.1/go.mod h1:92HiQEyzPoeMiCbgfG5Qe10JQVbtWMZOXucy56dKdGg= +kubevirt.io/containerized-data-importer-api v1.55.2 h1:AzYnKIUFkKwO6c0uCQZYlAIxfzbiPkJXP29hFhauaQ8= +kubevirt.io/containerized-data-importer-api v1.55.2/go.mod h1:92HiQEyzPoeMiCbgfG5Qe10JQVbtWMZOXucy56dKdGg= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 h1:fZYvD3/Vnitfkx6IJxjLAk8ugnZQ7CXVYcRfkSKmuZY= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= moul.io/http2curl v1.0.0 h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8= @@ -2186,8 +2188,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= -sigs.k8s.io/controller-runtime v0.0.0-20221211125314-222fb669e109 h1:+tDs4g6sAbPSVahRtUyLLwV+dryt4EOefGR4R5xcELA= -sigs.k8s.io/controller-runtime v0.0.0-20221211125314-222fb669e109/go.mod h1:Sfz/i9onkzbOA3LgIzvVDfl4b3f3CS4ij6hEXQkMqx8= +sigs.k8s.io/controller-runtime v0.14.0 h1:ju2xsov5Ara6FoQuddg+az+rAxsUsTYn2IYyEKCTyDc= +sigs.k8s.io/controller-runtime v0.14.0/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/e2e-cilium/e2e_cilium_suite_test.go b/test/e2e-cilium/e2e_cilium_suite_test.go deleted file mode 100644 index b1106a87695..00000000000 --- a/test/e2e-cilium/e2e_cilium_suite_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package e2e_cilium - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - - _ "github.com/kubeovn/kube-ovn/test/e2e/ip" - _ "github.com/kubeovn/kube-ovn/test/e2e/service" - _ "github.com/kubeovn/kube-ovn/test/e2e/subnet" -) - -func TestE2eEbpf(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kube-OVN E2E ebpf Suite") -} - -var _ = SynchronizedAfterSuite(func() {}, func() { - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - nss, err := f.KubeClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if nss != nil { - for _, ns := range nss.Items { - err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), ns.Name, metav1.DeleteOptions{}) - if err != nil { - Fail(err.Error()) - } - } - } - - err = f.OvnClientSet.KubeovnV1().Subnets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } -}) - -func setExternalRoute(af int, dst, gw string) { - if dst == "" || gw == "" { - return - } - - cmd := exec.Command("docker", "exec", "kube-ovn-e2e", "ip", fmt.Sprintf("-%d", af), "route", "replace", dst, "via", gw) - output, err := cmd.CombinedOutput() - if err != nil { - Fail((fmt.Sprintf(`failed to execute command "%s": %v, output: %s`, cmd.String(), err, strings.TrimSpace(string(output))))) - } -} - -var _ = SynchronizedBeforeSuite(func() []byte { - subnetName := "static-ip" - namespace := "static-ip" - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - _, err := f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}}}, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: subnetName, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "12.10.0.0/16", - Namespaces: []string{namespace}, - Protocol: util.CheckProtocol("12.10.0.0/16"), - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - err = f.WaitSubnetReady(subnetName) - if err != nil { - Fail(err.Error()) - } - - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - Fail(err.Error()) - } - kubeadmConfigMap, err := f.KubeClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.Background(), kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{}) - if err != nil { - Fail(err.Error()) - } - - clusterConfig := &kubeadmapi.ClusterConfiguration{} - if err = k8sruntime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(kubeadmConfigMap.Data[kubeadmconstants.ClusterConfigurationConfigMapKey]), clusterConfig); err != nil { - Fail(fmt.Sprintf("failed to decode kubeadm cluster configuration from bytes: %v", err)) - } - - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[0]) - podSubnetV4, podSubnetV6 := util.SplitStringIP(clusterConfig.Networking.PodSubnet) - svcSubnetV4, svcSubnetV6 := util.SplitStringIP(clusterConfig.Networking.ServiceSubnet) - setExternalRoute(4, podSubnetV4, nodeIPv4) - setExternalRoute(4, svcSubnetV4, nodeIPv4) - setExternalRoute(6, podSubnetV6, nodeIPv6) - setExternalRoute(6, svcSubnetV6, nodeIPv6) - - return nil -}, func(data []byte) {}) diff --git a/test/e2e-multus/e2e_multus_suite_test.go b/test/e2e-multus/e2e_multus_suite_test.go deleted file mode 100644 index 41b4d44a263..00000000000 --- a/test/e2e-multus/e2e_multus_suite_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package e2e_multus_test - -import ( - "context" - "fmt" - "os" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - - // tests to run - _ "github.com/kubeovn/kube-ovn/test/e2e-multus/lbsvc" -) - -func TestE2eMultus(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kube-OVN multus E2E Suite") -} - -var _ = SynchronizedAfterSuite(func() {}, func() { - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - nss, err := f.KubeClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if nss != nil { - for _, ns := range nss.Items { - err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), ns.Name, metav1.DeleteOptions{}) - if err != nil { - Fail(err.Error()) - } - } - } - - err = f.OvnClientSet.KubeovnV1().Subnets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } -}) - -var _ = SynchronizedBeforeSuite(func() []byte { - subnetName := "attach-subnet" - namespace := "lb-test" - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - _, err := f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}}}, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: subnetName, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "172.18.0.0/16", - Protocol: util.CheckProtocol("172.18.0.0/16"), - Provider: "lb-svc-attachment.kube-system", - ExcludeIps: []string{"172.18.0.0..172.18.0.10"}, - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - - return nil -}, func(data []byte) {}) diff --git a/test/e2e-multus/lbsvc/lbsvc.go b/test/e2e-multus/lbsvc/lbsvc.go deleted file mode 100644 index bee9f00a32c..00000000000 --- a/test/e2e-multus/lbsvc/lbsvc.go +++ /dev/null @@ -1,184 +0,0 @@ -package lbsvc - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - ATTACHMENT_NAME = "lb-svc-attachment" - ATTACHMENT_NS = "kube-system" -) - -func genLbSvcDpName(name string) string { - return fmt.Sprintf("lb-svc-%s", name) -} - -var _ = Describe("Lbsvc", func() { - f := framework.NewFramework("lbsvc", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - It("dynamic lb svc", func() { - name := "dynamic-service" - namespace := "lb-test" - - var val intstr.IntOrString - val.IntVal = 80 - var port corev1.ServicePort - port.Name = "test" - port.Protocol = corev1.ProtocolTCP - port.Port = 80 - port.TargetPort = val - - By("create service") - svc := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true", "app": "dynamic"}, - Annotations: map[string]string{"lb-svc-attachment.kube-system.kubernetes.io/logical_switch": "attach-subnet"}, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{port}, - Selector: map[string]string{"app": "dynamic"}, - SessionAffinity: corev1.ServiceAffinityNone, - Type: corev1.ServiceTypeLoadBalancer, - }, - } - _, err := f.KubeClientSet.CoreV1().Services(namespace).Create(context.Background(), &svc, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - time.Sleep(time.Second * 15) - - By("check deployment") - dpName := genLbSvcDpName(name) - deploy, err := f.KubeClientSet.AppsV1().Deployments(namespace).Get(context.Background(), dpName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Status.AvailableReplicas).To(Equal(int32(1))) - - By("wait pod running") - var pod corev1.Pod - found := false - - pods, _ := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) - for _, pod = range pods.Items { - if strings.Contains(pod.Name, dpName) { - found = true - break - } - } - Expect(found).To(Equal(true)) - - _, err = f.WaitPodReady(pod.Name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("check pod annotation") - providerName := fmt.Sprintf("%s.%s", ATTACHMENT_NAME, ATTACHMENT_NS) - allocateAnnotation := fmt.Sprintf(util.AllocatedAnnotationTemplate, providerName) - Expect(pod.Annotations[allocateAnnotation]).To(Equal("true")) - - attachCidrAnnotation := fmt.Sprintf(util.CidrAnnotationTemplate, providerName) - attachIpAnnotation := fmt.Sprintf(util.IpAddressAnnotationTemplate, providerName) - result := util.CIDRContainIP(pod.Annotations[attachCidrAnnotation], pod.Annotations[attachIpAnnotation]) - Expect(result).To(Equal(true)) - - By("check svc externalIP") - checkSvc, err := f.KubeClientSet.CoreV1().Services(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - lbIP := checkSvc.Status.LoadBalancer.Ingress[0].IP - Expect(pod.Annotations[attachIpAnnotation]).To(Equal(lbIP)) - - By("Delete svc") - err = f.KubeClientSet.CoreV1().Services(namespace).Delete(context.Background(), svc.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("static lb svc", func() { - name := "static-service" - namespace := "lb-test" - staticIP := "172.18.0.99" - - var val intstr.IntOrString - val.IntVal = 80 - var port corev1.ServicePort - port.Name = "test" - port.Protocol = corev1.ProtocolTCP - port.Port = 80 - port.TargetPort = val - - By("create service") - svc := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true", "app": "static"}, - Annotations: map[string]string{"lb-svc-attachment.kube-system.kubernetes.io/logical_switch": "attach-subnet"}, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{port}, - Selector: map[string]string{"app": "static"}, - SessionAffinity: corev1.ServiceAffinityNone, - Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerIP: staticIP, - }, - } - - _, err := f.KubeClientSet.CoreV1().Services(namespace).Create(context.Background(), &svc, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - time.Sleep(time.Second * 10) - - By("check deployment") - dpName := genLbSvcDpName(name) - deploy, err := f.KubeClientSet.AppsV1().Deployments(namespace).Get(context.Background(), dpName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Status.AvailableReplicas).To(Equal(int32(1))) - - By("wait pod running") - var pod corev1.Pod - found := false - - pods, _ := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) - for _, pod = range pods.Items { - if strings.Contains(pod.Name, dpName) { - found = true - break - } - } - Expect(found).To(Equal(true)) - - _, err = f.WaitPodReady(pod.Name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("check pod annotation") - providerName := fmt.Sprintf("%s.%s", ATTACHMENT_NAME, ATTACHMENT_NS) - allocateAnnotation := fmt.Sprintf(util.AllocatedAnnotationTemplate, providerName) - Expect(pod.Annotations[allocateAnnotation]).To(Equal("true")) - - attachCidrAnnotation := fmt.Sprintf(util.CidrAnnotationTemplate, providerName) - attachIpAnnotation := fmt.Sprintf(util.IpAddressAnnotationTemplate, providerName) - result := util.CIDRContainIP(pod.Annotations[attachCidrAnnotation], pod.Annotations[attachIpAnnotation]) - Expect(result).To(Equal(true)) - - By("check svc externalIP") - checkSvc, err := f.KubeClientSet.CoreV1().Services(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - lbIP := checkSvc.Status.LoadBalancer.Ingress[0].IP - Expect(pod.Annotations[attachIpAnnotation]).To(Equal(lbIP)) - Expect(staticIP).To(Equal(lbIP)) - - By("Delete svc") - err = f.KubeClientSet.CoreV1().Services(namespace).Delete(context.Background(), svc.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) -}) diff --git a/test/e2e-ovn-ic/e2e_suite_test.go b/test/e2e-ovn-ic/e2e_suite_test.go deleted file mode 100644 index 1141d633bea..00000000000 --- a/test/e2e-ovn-ic/e2e_suite_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package e2e_ovn_ic_test - -import ( - "context" - "fmt" - "net" - "os" - "os/exec" - "strings" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func TestE2eOvnIc(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kube-OVN E2E OVN-IC Suite") -} - -var _ = SynchronizedAfterSuite(func() {}, func() { - - output, err := exec.Command("kubectl", "config", "use-context", "kind-kube-ovn").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "ovn-nb-leader=true"}) - Expect(err).NotTo(HaveOccurred()) - if len(pods.Items) != 1 { - Fail(fmt.Sprintf("pods %s not right", pods)) - } - - cmdLS := "ovn-nbctl --format=csv --data=bare --columns=name --no-heading find logical_switch name=ts" - sout, _, err := f.ExecToPodThroughAPI(cmdLS, "ovn-central", pods.Items[0].Name, pods.Items[0].Namespace, nil) - if err != nil { - Fail(fmt.Sprintf("switch ts does not exist in pod %s for %s", pods.Items[0].Name, err)) - } - if strings.TrimSpace(sout) != "ts" { - Fail(fmt.Sprintf("switch ts is not right as %s", sout)) - } - - checkLSP("ts-az1", pods.Items[0], f) - checkLSP("ts-az0", pods.Items[0], f) - - output, err = exec.Command("kubectl", "-n", "kube-system", "-l", "app=kube-ovn-pinger", "get", "pod", "-o=jsonpath={.items[0].status.podIP}").CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - if net.ParseIP(string(output)) == nil { - Fail(fmt.Sprintf("pinger ip %s not right", output)) - } - ip0 := string(output) - - // To avoid the situation that the wrong kube-config context is loaded in framework, and then the test cloud always - // pass the test. a replacement kube-client solution is introduced to force the correct context pod-list to be read. - // Then if framework read the wrong context, it will get wrong pod which from another cluster. - output, err = exec.Command("kubectl", "config", "use-context", "kind-kube-ovn1").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - f = framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - kubecfg1, err := buildConfigFromFlags("kind-kube-ovn1", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - Expect(err).NotTo(HaveOccurred()) - kubeClient1, err := kubernetes.NewForConfig(kubecfg1) - Expect(err).NotTo(HaveOccurred()) - - pods1, err := kubeClient1.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "ovn-nb-leader=true"}) - Expect(err).NotTo(HaveOccurred()) - if len(pods1.Items) != 1 { - Fail(fmt.Sprintf("pods %s length not 1", pods1)) - } - - sout, _, err = f.ExecToPodThroughAPI(cmdLS, "ovn-central", pods1.Items[0].Name, pods1.Items[0].Namespace, nil) - if err != nil { - Fail(fmt.Sprintf("switch ts does not exist in pod %s with %s", pods1.Items[0].Name, err)) - } - if strings.TrimSpace(sout) != "ts" { - Fail(fmt.Sprintf("switch ts is not right as %s", sout)) - } - - checkLSP("ts-az1", pods1.Items[0], f) - checkLSP("ts-az0", pods1.Items[0], f) - - pod, err := exec.Command("kubectl", "-n", "kube-system", "-l", "app=kube-ovn-pinger", "get", "pod", "-o=jsonpath={.items[0].metadata.name}").CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - Expect(pod).ShouldNot(BeEmpty()) - - output, err = exec.Command("kubectl", "-n", "kube-system", "exec", "-i", string(pod), "--", "/usr/bin/ping", ip0, "-c2").CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - Expect(string(output)).Should(ContainSubstring("0% packet loss")) - - output, err = exec.Command("kubectl", "apply", "-f", "/tmp/ovn-ic-1-alter.yaml").CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - Expect(string(output)).Should(ContainSubstring("configured")) - - time.Sleep(time.Second * 10) - - checkLSP("ts-az1111", pods1.Items[0], f) - - output, err = exec.Command("kubectl", "-n", "kube-system", "exec", "-i", string(pod), "--", "/usr/bin/ping", ip0, "-c2").CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - Expect(string(output)).Should(ContainSubstring("0% packet loss")) - -}) - -func buildConfigFromFlags(context, kubeconfigPath string) (*rest.Config, error) { - return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &clientcmd.ConfigOverrides{ - CurrentContext: context, - }).ClientConfig() -} - -func checkLSP(lspName string, pod v1.Pod, f *framework.Framework) { - cmd := fmt.Sprintf("ovn-nbctl --format=csv --data=bare --columns=name --no-heading find logical_switch_port name=%s", lspName) - sout, _, err := f.ExecToPodThroughAPI(cmd, "ovn-central", pod.Name, pod.Namespace, nil) - if err != nil { - Fail(fmt.Sprintf("switch port %s ts does not exist", lspName)) - } - if strings.TrimSpace(sout) != lspName { - Fail(fmt.Sprintf("switch port %s is not right as %s", lspName, sout)) - } -} diff --git a/test/e2e-underlay-single-nic/e2e_suite_test.go b/test/e2e-underlay-single-nic/e2e_suite_test.go deleted file mode 100644 index 9c07b4a3158..00000000000 --- a/test/e2e-underlay-single-nic/e2e_suite_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package e2e_underlay_single_nic_test - -import ( - "context" - "fmt" - "os" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" - - // tests to run - _ "github.com/kubeovn/kube-ovn/test/e2e-underlay-single-nic/kubectl-ko" - _ "github.com/kubeovn/kube-ovn/test/e2e-underlay-single-nic/node" - _ "github.com/kubeovn/kube-ovn/test/e2e-underlay-single-nic/overlay" -) - -func TestE2e(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kube-OVN Vlan E2E Suite") -} - -var _ = SynchronizedAfterSuite(func() {}, func() { - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - nss, err := f.KubeClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if nss != nil { - for _, ns := range nss.Items { - err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), ns.Name, metav1.DeleteOptions{}) - if err != nil { - Fail(err.Error()) - } - } - } - - err = f.OvnClientSet.KubeovnV1().Subnets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } -}) diff --git a/test/e2e-underlay-single-nic/kubectl-ko/ko.go b/test/e2e-underlay-single-nic/kubectl-ko/ko.go deleted file mode 100644 index fe2b2f2f845..00000000000 --- a/test/e2e-underlay-single-nic/kubectl-ko/ko.go +++ /dev/null @@ -1,41 +0,0 @@ -package kubectl_ko - -import ( - "context" - "fmt" - "os" - "os/exec" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = Describe("[kubectl-ko]", func() { - f := framework.NewFramework("kubectl-ko", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - It("trace", func() { - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-pinger"}) - Expect(err).NotTo(HaveOccurred()) - - pod := pods.Items[0] - dst := "114.114.114.114" - if util.CheckProtocol(pod.Status.PodIP) == kubeovn.ProtocolIPv6 { - dst = "2400:3200::1" - } - - output, err := exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "icmp").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "tcp", "80").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "udp", "53").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) -}) diff --git a/test/e2e-underlay-single-nic/node/node.go b/test/e2e-underlay-single-nic/node/node.go deleted file mode 100644 index a274357ac96..00000000000 --- a/test/e2e-underlay-single-nic/node/node.go +++ /dev/null @@ -1,228 +0,0 @@ -package node - -import ( - "context" - "encoding/json" - "fmt" - "net" - "os" - "strings" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const vlanNic = "eth0" - -var vlanBr = util.ExternalBridgeName("provider") - -var network *nodeNetwork - -type nodeNetwork struct { - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -func init() { - data, err := os.ReadFile("node/network.json") - if err != nil { - panic(err) - } - if err = json.Unmarshal(data, &network); err != nil { - panic(err) - } -} - -var _ = Describe("[Underlay Node]", func() { - f := framework.NewFramework("node", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - It("Single NIC", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nodes).NotTo(BeNil()) - Expect(len(nodes.Items)).NotTo(BeZero()) - - nodeIPs := make([]string, 0, len(nodes.Items)*2) - nodeRoutes := make([]string, 0, len(nodes.Items)*4) - if network != nil { - if network.IPAddress != "" { - addr := fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen) - nodeIPs = append(nodeIPs, addr) - _, ipNet, err := net.ParseCIDR(addr) - Expect(err).NotTo(HaveOccurred()) - nodeRoutes = append(nodeRoutes, fmt.Sprintf("%s ", ipNet.String())) - } - if network.GlobalIPv6Address != "" { - addr := fmt.Sprintf("%s/%d", network.GlobalIPv6Address, network.GlobalIPv6PrefixLen) - nodeIPs = append(nodeIPs, addr) - _, ipNet, err := net.ParseCIDR(addr) - Expect(err).NotTo(HaveOccurred()) - nodeRoutes = append(nodeRoutes, fmt.Sprintf("%s ", ipNet.String())) - } - if network.Gateway != "" { - nodeRoutes = append(nodeRoutes, fmt.Sprintf("default via %s ", network.Gateway)) - } - if network.IPv6Gateway != "" { - nodeRoutes = append(nodeRoutes, fmt.Sprintf("default via %s ", network.IPv6Gateway)) - } - } else { - for _, node := range nodes.Items { - if node.Name == "kube-ovn-control-plane" { - ipv4, ipv6 := util.GetNodeInternalIP(node) - if ipv4 != "" { - nodeIPs = append(nodeIPs, ipv4+"/") - } - if ipv6 != "" { - nodeIPs = append(nodeIPs, ipv6+"/") - } - break - } - } - } - Expect(nodeIPs).NotTo(BeEmpty()) - - ovsPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - Expect(ovsPods).NotTo(BeNil()) - - var ovsPod *corev1.Pod - for _, pod := range ovsPods.Items { - for _, ip := range nodeIPs { - if strings.HasPrefix(ip, pod.Status.HostIP+"/") { - ovsPod = &pod - break - } - } - if ovsPod != nil { - break - } - } - Expect(ovsPod).NotTo(BeNil()) - - stdout, _, err := f.ExecToPodThroughAPI("ovs-vsctl list-ports "+vlanBr, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var found bool - for _, port := range strings.Split(stdout, "\n") { - if port == vlanNic { - found = true - break - } - } - Expect(found).To(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI("ip addr show "+vlanBr, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).NotTo(BeEmpty()) - - ipFound := make([]bool, len(nodeIPs)) - for i, s := range strings.Split(stdout, "\n") { - if i == 0 { - var linkUp bool - idx1, idx2 := strings.IndexRune(s, '<'), strings.IndexRune(s, '>') - if idx1 > 0 && idx2 > idx1+1 { - for _, state := range strings.Split(s[idx1+1:idx2], ",") { - if state == "UP" { - linkUp = true - break - } - } - } - Expect(linkUp).To(BeTrue()) - continue - } - if i == 1 && network != nil && network.MacAddress != "" { - Expect(strings.TrimSpace(s)).To(HavePrefix("link/ether %s ", network.MacAddress)) - continue - } - - s = strings.TrimSpace(s) - for i, ip := range nodeIPs { - if strings.HasPrefix(s, "inet "+ip) || strings.HasPrefix(s, "inet6 "+ip) { - ipFound[i] = true - break - } - } - } - for _, found := range ipFound { - Expect(found).To(BeTrue()) - } - - stdout, _, err = f.ExecToPodThroughAPI("ip addr show "+vlanNic, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).NotTo(BeEmpty()) - - var hasAddr bool - for _, s := range strings.Split(stdout, "\n") { - if s = strings.TrimSpace(s); strings.HasPrefix(s, "inet ") || strings.HasPrefix(s, "inet6 ") { - ip, _, err := net.ParseCIDR(strings.Fields(s)[1]) - Expect(err).NotTo(HaveOccurred()) - if ip.IsLinkLocalUnicast() { - continue - } - hasAddr = true - break - } - } - Expect(hasAddr).To(BeFalse()) - - stdout, _, err = f.ExecToPodThroughAPI("ip -4 route show dev "+vlanBr, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - routes := strings.Split(stdout, "\n") - - stdout, _, err = f.ExecToPodThroughAPI("ip -6 route show dev "+vlanBr, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - routes = append(routes, strings.Split(stdout, "\n")...) - - routeFound := make([]bool, len(nodeRoutes)) - for i, prefix := range nodeRoutes { - for _, route := range routes { - if strings.HasPrefix(route, prefix) { - routeFound[i] = true - break - } - } - } - for _, found := range routeFound { - Expect(found).To(BeTrue()) - } - - stdout, _, err = f.ExecToPodThroughAPI("ip -4 route show dev "+vlanNic, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(strings.TrimSpace(stdout)).To(BeEmpty()) - - stdout, _, err = f.ExecToPodThroughAPI("ip -6 route show dev "+vlanNic, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var hasRoute bool - for _, s := range strings.Split(stdout, "\n") { - if s = strings.TrimSpace(s); s == "" { - continue - } - - if !strings.HasPrefix(s, "default ") { - addr := strings.Split(strings.Fields(s)[0], "/")[0] - ip := net.ParseIP(addr) - Expect(ip).NotTo(BeNil()) - if ip.IsLinkLocalUnicast() { - continue - } - } - - hasRoute = true - break - } - Expect(hasRoute).To(BeFalse()) - }) -}) diff --git a/test/e2e-underlay-single-nic/overlay/overlay.go b/test/e2e-underlay-single-nic/overlay/overlay.go deleted file mode 100644 index 4db66696be3..00000000000 --- a/test/e2e-underlay-single-nic/overlay/overlay.go +++ /dev/null @@ -1,167 +0,0 @@ -package overlay - -import ( - "context" - "fmt" - "os" - "strings" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const testImage = "kubeovn/pause:3.2" - -var _ = Describe("[Overlay]", func() { - Context("[Connectivity]", func() { - It("u2o", func() { - f := framework.NewFramework("overlay", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - By("get default subnet") - cachedSubnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), "ovn-default", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - if cachedSubnet.Spec.Protocol == kubeovnv1.ProtocolIPv6 { - return - } - - By("enable u2oRouting") - if !cachedSubnet.Spec.U2oRouting { - subnet := cachedSubnet.DeepCopy() - subnet.Spec.U2oRouting = true - _, err = f.OvnClientSet.KubeovnV1().Subnets().Update(context.Background(), subnet, metav1.UpdateOptions{}) - Expect(err).NotTo(HaveOccurred()) - } - - By("create overlay namespace") - namespace := "e2e-overlay" - _, err = f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}}}, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("create overlay subnet") - subnetName := "e2e-overlay" - s := kubeovnv1.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: subnetName, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovnv1.SubnetSpec{ - CIDRBlock: "12.10.0.0/16", - Namespaces: []string{namespace}, - Protocol: util.CheckProtocol("12.10.0.0/16"), - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - err = f.WaitSubnetReady(subnetName) - Expect(err).NotTo(HaveOccurred()) - - By("create underlay pod") - var autoMount bool - upod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: "default", - Labels: map[string]string{"e2e": "true"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err = f.KubeClientSet.CoreV1().Pods(upod.Namespace).Create(context.Background(), upod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - upod, err = f.WaitPodReady(upod.Name, upod.Namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(upod.Spec.NodeName).NotTo(BeEmpty()) - - By("create overlay pod") - opod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err = f.KubeClientSet.CoreV1().Pods(opod.Namespace).Create(context.Background(), opod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - opod, err = f.WaitPodReady(opod.Name, opod.Namespace) - Expect(err).NotTo(HaveOccurred()) - - By("get kube-ovn-cni pod") - podList, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-cni"}) - Expect(err).NotTo(HaveOccurred()) - Expect(podList).NotTo(BeNil()) - - var cniPod *corev1.Pod - for i, pod := range podList.Items { - if pod.Spec.NodeName == upod.Spec.NodeName { - cniPod = &podList.Items[i] - break - } - } - Expect(cniPod).NotTo(BeNil()) - - By("get underlay pod's netns") - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", upod.Name, upod.Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("ping overlay pod") - cmd = fmt.Sprintf("nsenter --net=%s ping -c1 -W1 %s", netns, opod.Status.PodIP) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" 0% packet loss")) - - By("delete underlay pod") - err = f.KubeClientSet.CoreV1().Pods(upod.Namespace).Delete(context.Background(), upod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("delete overlay pod") - err = f.KubeClientSet.CoreV1().Pods(opod.Namespace).Delete(context.Background(), opod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("delete overlay subnet") - err = f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), subnetName, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("delete overlay namespace") - err = f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), opod.Namespace, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - }) -}) diff --git a/test/cyclonus.yaml b/test/e2e/cyclonus.yaml similarity index 100% rename from test/cyclonus.yaml rename to test/e2e/cyclonus.yaml diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go deleted file mode 100644 index 227025e1479..00000000000 --- a/test/e2e/e2e_suite_test.go +++ /dev/null @@ -1,349 +0,0 @@ -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "os" - "os/exec" - "strconv" - "strings" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - - // tests to run - _ "github.com/kubeovn/kube-ovn/test/e2e/ip" - _ "github.com/kubeovn/kube-ovn/test/e2e/kubectl-ko" - _ "github.com/kubeovn/kube-ovn/test/e2e/node" - _ "github.com/kubeovn/kube-ovn/test/e2e/qos" - _ "github.com/kubeovn/kube-ovn/test/e2e/service" - _ "github.com/kubeovn/kube-ovn/test/e2e/subnet" - "github.com/kubeovn/kube-ovn/test/e2e/underlay" -) - -var nodeNetworks map[string]nodeNetwork - -type nodeNetwork struct { - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -func init() { - data, err := os.ReadFile("network.json") - if err != nil { - panic(err) - } - if err = json.Unmarshal(data, &nodeNetworks); err != nil { - panic(err) - } -} - -func TestE2e(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kube-OVN E2E Suite") -} - -var _ = SynchronizedAfterSuite(func() {}, func() { - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - for { - pods, err := f.KubeClientSet.CoreV1().Pods(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if len(pods.Items) == 0 { - break - } - time.Sleep(time.Second) - } - - for { - nss, err := f.KubeClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if len(nss.Items) == 0 { - break - } - for _, ns := range nss.Items { - if ns.DeletionTimestamp != nil { - continue - } - err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), ns.Name, metav1.DeleteOptions{}) - if err != nil { - Fail(err.Error()) - } - } - time.Sleep(time.Second) - } - - err := f.OvnClientSet.KubeovnV1().Subnets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - for { - subnets, err := f.OvnClientSet.KubeovnV1().Subnets().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if len(subnets.Items) == 0 { - break - } - time.Sleep(time.Second) - } - - err = f.OvnClientSet.KubeovnV1().Vlans().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - for { - vlans, err := f.OvnClientSet.KubeovnV1().Vlans().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if len(vlans.Items) == 0 { - break - } - time.Sleep(time.Second) - } - - err = f.OvnClientSet.KubeovnV1().ProviderNetworks().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - for { - pns, err := f.OvnClientSet.KubeovnV1().ProviderNetworks().List(context.Background(), metav1.ListOptions{LabelSelector: "e2e=true"}) - if err != nil { - Fail(err.Error()) - } - if len(pns.Items) == 0 { - break - } - time.Sleep(time.Second) - } -}) - -func setExternalRoute(af int, dst, gw string) { - if dst == "" || gw == "" { - return - } - - cmd := exec.Command("docker", "exec", "kube-ovn-e2e", "ip", fmt.Sprintf("-%d", af), "route", "replace", dst, "via", gw) - output, err := cmd.CombinedOutput() - if err != nil { - Fail((fmt.Sprintf(`failed to execute command "%s": %v, output: %s`, cmd.String(), err, strings.TrimSpace(string(output))))) - } -} - -var _ = SynchronizedBeforeSuite(func() []byte { - subnetName := "static-ip" - namespace := "static-ip" - f := framework.NewFramework("init", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - _, err := f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}}}, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: subnetName, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "12.10.0.0/16", - Namespaces: []string{namespace}, - Protocol: util.CheckProtocol("12.10.0.0/16"), - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - if err != nil { - Fail(err.Error()) - } - err = f.WaitSubnetReady(subnetName) - if err != nil { - Fail(err.Error()) - } - - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - Fail(err.Error()) - } - kubeadmConfigMap, err := f.KubeClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.Background(), kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{}) - if err != nil { - Fail(err.Error()) - } - - clusterConfig := &kubeadmapi.ClusterConfiguration{} - if err = k8sruntime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(kubeadmConfigMap.Data[kubeadmconstants.ClusterConfigurationConfigMapKey]), clusterConfig); err != nil { - Fail(fmt.Sprintf("failed to decode kubeadm cluster configuration from bytes: %v", err)) - } - - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[0]) - podSubnetV4, podSubnetV6 := util.SplitStringIP(clusterConfig.Networking.PodSubnet) - svcSubnetV4, svcSubnetV6 := util.SplitStringIP(clusterConfig.Networking.ServiceSubnet) - setExternalRoute(4, podSubnetV4, nodeIPv4) - setExternalRoute(4, svcSubnetV4, nodeIPv4) - setExternalRoute(6, podSubnetV6, nodeIPv6) - setExternalRoute(6, svcSubnetV6, nodeIPv6) - - // underlay - var vlanID int - providerInterface := underlay.UnderlayInterface - if underlay.VlanID != "" { - if vlanID, err = strconv.Atoi(underlay.VlanID); err != nil || vlanID <= 0 || vlanID > 4095 { - Fail(underlay.VlanID + " is not a valid VLAN ID") - } - } - - var underlayNodeIPs []string - var underlayCIDR, underlayGateway string - for node, network := range nodeNetworks { - underlay.SetNodeMac(node, network.MacAddress) - if network.IPAddress != "" { - underlay.AddNodeIP(network.IPAddress) - underlayNodeIPs = append(underlayNodeIPs, network.IPAddress) - underlay.AddNodeAddrs(node, fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen)) - if underlayCIDR == "" { - underlayCIDR = fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen) - } - } - if network.GlobalIPv6Address != "" { - underlay.AddNodeAddrs(node, fmt.Sprintf("%s/%d", network.GlobalIPv6Address, network.GlobalIPv6PrefixLen)) - } - if network.Gateway != "" { - underlay.AddNodeRoutes(node, fmt.Sprintf("default via %s ", network.Gateway)) - if underlayGateway == "" { - underlayGateway = network.Gateway - } - } - if network.IPv6Gateway != "" { - underlay.AddNodeRoutes(node, fmt.Sprintf("default via %s ", network.IPv6Gateway)) - } - } - underlay.SetCIDR(underlayCIDR) - - cniPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-cni"}) - if err != nil { - Fail(err.Error()) - } - - for i := range nodes.Items { - var cniPod *corev1.Pod - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[i]) - for _, pod := range cniPods.Items { - if pod.Status.HostIP == nodeIPv4 || pod.Status.HostIP == nodeIPv6 { - cniPod = &pod - break - } - } - if cniPod == nil { - Fail("failed to get CNI pod on node " + nodes.Items[i].Name) - return nil - } - - // change MTU - mtu := 1500 - (i+1)*5 - cmd := fmt.Sprintf("ip link set %s mtu %d", providerInterface, mtu) - if _, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil); err != nil { - Fail(fmt.Sprintf("failed to set MTU of %s on node %s: %v", providerInterface, nodes.Items[i].Name, err)) - } - underlay.SetNodeMTU(nodes.Items[i].Name, mtu) - } - - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: underlay.Namespace, - Labels: map[string]string{"e2e": "true"}, - }, - } - if _, err = f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}); err != nil { - Fail(err.Error()) - } - - // create provider network - pn := &kubeovn.ProviderNetwork{ - ObjectMeta: metav1.ObjectMeta{ - Name: underlay.ProviderNetwork, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.ProviderNetworkSpec{ - DefaultInterface: providerInterface, - ExchangeLinkName: underlay.ExchangeLinkName, - }, - } - if _, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Create(context.Background(), pn, metav1.CreateOptions{}); err != nil { - Fail("failed to create provider network: " + err.Error()) - } - if err = f.WaitProviderNetworkReady(pn.Name); err != nil { - Fail("provider network failed: " + err.Error()) - } - if pn, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Get(context.Background(), pn.Name, metav1.GetOptions{}); err != nil { - Fail("failed to get provider network: " + err.Error()) - } - for _, node := range nodes.Items { - if !pn.Status.NodeIsReady(node.Name) { - Fail(fmt.Sprintf("provider network on node %s is not ready", node.Name)) - } - } - - // create vlan - vlan := kubeovn.Vlan{ - ObjectMeta: metav1.ObjectMeta{ - Name: underlay.Vlan, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.VlanSpec{ - ID: vlanID, - Provider: pn.Name, - }, - } - if _, err = f.OvnClientSet.KubeovnV1().Vlans().Create(context.Background(), &vlan, metav1.CreateOptions{}); err != nil { - Fail("failed to create vlan: " + err.Error()) - } - - // create subnet - subnet := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: underlay.Subnet, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: underlayCIDR, - Gateway: underlayGateway, - ExcludeIps: underlayNodeIPs, - Vlan: vlan.Name, - Namespaces: []string{underlay.Namespace}, - Protocol: util.CheckProtocol(underlayCIDR), - }, - } - if _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &subnet, metav1.CreateOptions{}); err != nil { - Fail("failed to create subnet: " + err.Error()) - } - if err = f.WaitSubnetReady(subnet.Name); err != nil { - Fail("subnet failed: " + err.Error()) - } - - return nil -}, func(data []byte) {}) diff --git a/test/e2e/framework/daemonset.go b/test/e2e/framework/daemonset.go new file mode 100644 index 00000000000..5066fd44d81 --- /dev/null +++ b/test/e2e/framework/daemonset.go @@ -0,0 +1,47 @@ +package framework + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" +) + +func GetPodsForDaemonSet(cs clientset.Interface, ds *appsv1.DaemonSet) (*corev1.PodList, error) { + podSelector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, err + } + podListOptions := metav1.ListOptions{LabelSelector: podSelector.String()} + allPods, err := cs.CoreV1().Pods(ds.Namespace).List(context.TODO(), podListOptions) + if err != nil { + return nil, err + } + + ownedPods := &corev1.PodList{Items: make([]corev1.Pod, 0, len(allPods.Items))} + for i, pod := range allPods.Items { + controllerRef := metav1.GetControllerOf(&allPods.Items[i]) + if controllerRef != nil && controllerRef.UID == ds.UID { + ownedPods.Items = append(ownedPods.Items, pod) + } + } + + return ownedPods, nil +} + +func GetPodOnNodeForDaemonSet(cs clientset.Interface, ds *appsv1.DaemonSet, node string) (*corev1.Pod, error) { + pods, err := GetPodsForDaemonSet(cs, ds) + if err != nil { + return nil, err + } + for _, pod := range pods.Items { + if pod.Spec.NodeName == node { + return pod.DeepCopy(), nil + } + } + + return nil, fmt.Errorf("pod for daemonset %s/%s on node %s not found", ds.Namespace, ds.Name, node) +} diff --git a/test/e2e/framework/docker/container.go b/test/e2e/framework/docker/container.go new file mode 100644 index 00000000000..b1493fd85f1 --- /dev/null +++ b/test/e2e/framework/docker/container.go @@ -0,0 +1,25 @@ +package docker + +import ( + "context" + + "github.com/docker/docker/api/types" + dockerfilters "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" +) + +func ListContainers(filters map[string][]string) ([]types.Container, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + defer cli.Close() + + f := dockerfilters.NewArgs() + for k, v := range filters { + for _, v1 := range v { + f.Add(k, v1) + } + } + return cli.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: f}) +} diff --git a/test/e2e/framework/docker/exec.go b/test/e2e/framework/docker/exec.go new file mode 100644 index 00000000000..26f8ccda96b --- /dev/null +++ b/test/e2e/framework/docker/exec.go @@ -0,0 +1,71 @@ +package docker + +import ( + "bytes" + "context" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +type ErrNonZeroExitCode struct { + cmd string + code int +} + +func (e ErrNonZeroExitCode) Error() string { + return fmt.Sprintf("command %q exited with code %d", e.cmd, e.code) +} + +func Exec(id string, env []string, cmd ...string) (stdout, stderr []byte, err error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, nil, err + } + defer cli.Close() + + framework.Logf("Executing command %q in container %s", strings.Join(cmd, " "), id) + config := types.ExecConfig{ + Privileged: true, + AttachStderr: true, + AttachStdout: true, + Env: env, + Cmd: cmd, + } + createResp, err := cli.ContainerExecCreate(context.Background(), id, config) + if err != nil { + return nil, nil, err + } + + attachResp, err := cli.ContainerExecAttach(context.Background(), createResp.ID, types.ExecStartCheck{}) + if err != nil { + return nil, nil, err + } + defer attachResp.Close() + + var outBuf, errBuf bytes.Buffer + if _, err = stdcopy.StdCopy(&outBuf, &errBuf, attachResp.Reader); err != nil { + return nil, nil, err + } + + inspectResp, err := cli.ContainerExecInspect(context.Background(), createResp.ID) + if err != nil { + return nil, nil, err + } + + if inspectResp.ExitCode != 0 { + framework.Logf("command exited with code %d", inspectResp.ExitCode) + err = ErrNonZeroExitCode{cmd: strings.Join(cmd, " "), code: inspectResp.ExitCode} + } + + stdout, stderr = outBuf.Bytes(), errBuf.Bytes() + framework.Logf("stdout: %s", string(stdout)) + framework.Logf("stderr: %s", string(stderr)) + + return +} diff --git a/test/e2e/framework/docker/network.go b/test/e2e/framework/docker/network.go new file mode 100644 index 00000000000..66060c430f8 --- /dev/null +++ b/test/e2e/framework/docker/network.go @@ -0,0 +1,138 @@ +package docker + +import ( + "context" + "crypto/sha1" + "encoding/binary" + "fmt" + "net" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +const MTU = 1500 + +// https://github.com/kubernetes-sigs/kind/tree/main/pkg/cluster/internal/providers/docker/network.go#L313 +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} + +func getNetwork(name string, ignoreNotFound bool) (*types.NetworkResource, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + defer cli.Close() + + f := filters.NewArgs() + f.Add("name", name) + networks, err := cli.NetworkList(context.Background(), types.NetworkListOptions{Filters: f}) + if err != nil { + return nil, err + } + + if len(networks) == 0 { + if !ignoreNotFound { + return nil, fmt.Errorf("network %s does not exist", name) + } + return nil, nil + } + + network := networks[0] + return &network, nil +} + +func NetworkGet(name string) (*types.NetworkResource, error) { + return getNetwork(name, false) +} + +func NetworkCreate(name string, ipv6, skipIfExists bool) (*types.NetworkResource, error) { + if skipIfExists { + network, err := getNetwork(name, true) + if err != nil { + return nil, err + } + if network != nil { + return network, nil + } + } + + options := types.NetworkCreate{ + CheckDuplicate: true, + Driver: "bridge", + Attachable: true, + IPAM: &network.IPAM{ + Driver: "default", + }, + Options: map[string]string{ + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.driver.mtu": strconv.Itoa(MTU), + }, + } + if ipv6 { + options.EnableIPv6 = true + subnet := generateULASubnetFromName(name, 0) + gateway, err := util.FirstIP(subnet) + if err != nil { + return nil, err + } + config := network.IPAMConfig{ + Subnet: subnet, + Gateway: gateway, + } + options.IPAM.Config = append(options.IPAM.Config, config) + } + + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + defer cli.Close() + + if _, err = cli.NetworkCreate(context.Background(), name, options); err != nil { + return nil, err + } + + return getNetwork(name, false) +} + +func NetworkConnect(networkID, containerID string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + + return cli.NetworkConnect(context.Background(), networkID, containerID, nil) +} + +func NetworkDisconnect(networkID, containerID string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + + return cli.NetworkDisconnect(context.Background(), networkID, containerID, false) +} diff --git a/test/e2e/framework/expect.go b/test/e2e/framework/expect.go new file mode 100644 index 00000000000..ffb99a0bc4b --- /dev/null +++ b/test/e2e/framework/expect.go @@ -0,0 +1,177 @@ +/* +Copyright 2014 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "regexp" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + + "github.com/kubeovn/kube-ovn/pkg/util" +) + +var ( + macRegex = regexp.MustCompile(`^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$`) + uuidRegex = regexp.MustCompile(`^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$`) +) + +// ExpectEqual expects the specified two are the same, otherwise an exception raises +func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) +} + +// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises +func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...) +} + +// ExpectError expects an error happens, otherwise an exception raises +func ExpectError(err error, explain ...interface{}) { + gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...) +} + +// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. +func ExpectNoError(err error, explain ...interface{}) { + ExpectNoErrorWithOffset(1, err, explain...) +} + +// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller +// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). +func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { + if err == nil { + return + } + + // Errors usually contain unexported fields. We have to use + // a formatter here which can print those. + prefix := "" + if len(explain) > 0 { + if str, ok := explain[0].(string); ok { + prefix = fmt.Sprintf(str, explain[1:]...) + ": " + } else { + prefix = fmt.Sprintf("unexpected explain arguments, need format string: %v", explain) + } + } + + // This intentionally doesn't use gomega.Expect. Instead we take + // full control over what information is presented where: + // - The complete error object is logged because it may contain + // additional information that isn't included in its error + // string. + // - It is not included in the failure message because + // it might make the failure message very large and/or + // cause error aggregation to work less well: two + // failures at the same code line might not be matched in + // https://go.k8s.io/triage because the error details are too + // different. + Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)) + Fail(prefix+err.Error(), 1+offset) +} + +// ExpectConsistOf expects actual contains precisely the extra elements. +// The ordering of the elements does not matter. +func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) +} + +// ExpectContainElement expects actual contains the extra elements. +func ExpectContainElement(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.ContainElement(extra), explain...) +} + +// ExpectNotContainElement expects actual does not contain the extra elements. +func ExpectNotContainElement(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.ContainElement(extra), explain...) +} + +// ExpectHaveKey expects the actual map has the key in the keyset +func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) +} + +// ExpectHaveKeyWithValue expects the actual map has the passed in key/value pair. +func ExpectHaveKeyWithValue(actual interface{}, key, value interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.HaveKeyWithValue(key, value), explain...) +} + +// ExpectNotHaveKey expects the actual map does not have the key in the keyset +func ExpectNotHaveKey(actual interface{}, key interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.HaveKey(key), explain...) +} + +// ExpectNil expects actual is nil +func ExpectNil(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeNil(), explain...) +} + +// ExpectNotNil expects actual is not nil +func ExpectNotNil(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeNil(), explain...) +} + +// ExpectEmpty expects actual is empty +func ExpectEmpty(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) +} + +// ExpectNotEmpty expects actual is not empty +func ExpectNotEmpty(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeEmpty(), explain...) +} + +// ExpectHaveLen expects actual has the passed-in length +func ExpectHaveLen(actual interface{}, count int, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.HaveLen(count), explain...) +} + +// ExpectTrue expects actual is true +func ExpectTrue(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeTrue(), explain...) +} + +// ExpectFalse expects actual is false +func ExpectFalse(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeTrue(), explain...) +} + +// ExpectZero expects actual actual is the zero value for its type or actual is nil. +func ExpectZero(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeZero(), explain...) +} + +// ExpectNotZero expects actual is not nil nor the zero value for its type. +func ExpectNotZero(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeZero(), explain...) +} + +// ExpectUUID expects that the given string is an UUID. +func ExpectUUID(s string) { + ginkgo.By("verifying the string " + s + " is an UUID") + ExpectTrue(uuidRegex.MatchString(s)) +} + +// ExpectMAC expects that the given string is a MAC address. +func ExpectMAC(s string) { + ginkgo.By("verifying the string " + s + " is a MAC address") + ExpectTrue(macRegex.MatchString(s)) +} + +// ExpectIPInCIDR expects that the given IP address in within the CIDR. +func ExpectIPInCIDR(ip, cidr string) { + ginkgo.By("verifying IP address " + ip + " is within the CIDR " + cidr) + ExpectTrue(util.CIDRContainIP(cidr, ip)) +} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index bb919ce476b..fbd8253f42c 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -1,427 +1,127 @@ package framework import ( - "bytes" - "context" - "fmt" - "io" - "os/exec" - "strings" + "os" "time" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" - "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/framework" + admissionapi "k8s.io/pod-security-admission/api" - . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" - v1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - clientset "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned" + kubeovncs "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned" ) -type Framework struct { - BaseName string - KubeOvnNamespace string - KubeClientSet kubernetes.Interface - OvnClientSet clientset.Interface - KubeConfig *rest.Config -} +const ( + // poll is how often to Poll resources. + poll = 2 * time.Second -// timeout for waiting resources ready -const waitTimeout = 180 + timeout = 2 * time.Minute +) -func NewFramework(baseName, kubeConfig string) *Framework { - f := &Framework{BaseName: baseName} +type Framework struct { + KubeContext string + *framework.Framework + KubeOVNClientSet kubeovncs.Interface - cfg, err := clientcmd.BuildConfigFromFlags("", kubeConfig) - if err != nil { - panic(err.Error()) - } - f.KubeConfig = cfg + // master/release-1.10/... + ClusterVersion string + // ipv4/ipv6/dual + ClusterIpFamily string + // overlay/underlay/underlay-hairpin + ClusterNetworkMode string +} - cfg.QPS = 1000 - cfg.Burst = 2000 - kubeClient, err := kubernetes.NewForConfig(cfg) - if err != nil { - panic(err.Error()) +func NewDefaultFramework(baseName string) *Framework { + f := &Framework{ + Framework: framework.NewDefaultFramework(baseName), } + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.ClusterIpFamily = os.Getenv("E2E_IP_FAMILY") + f.ClusterVersion = os.Getenv("E2E_BRANCH") + f.ClusterNetworkMode = os.Getenv("E2E_NETWORK_MODE") - f.KubeClientSet = kubeClient - - kubeOvnClient, err := clientset.NewForConfig(cfg) - if err != nil { - panic(err.Error()) - } + ginkgo.BeforeEach(f.BeforeEach) - f.OvnClientSet = kubeOvnClient return f } -func (f *Framework) GetName() string { - return strings.Replace(CurrentSpecReport().LeafNodeText, " ", "-", -1) -} - -func (f *Framework) WaitProviderNetworkReady(providerNetwork string) error { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - - pn, err := f.OvnClientSet.KubeovnV1().ProviderNetworks().Get(context.Background(), providerNetwork, metav1.GetOptions{}) - if err != nil { - return err - } - if pn.Status.Ready { - return nil - } - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for provider-network %s to be ready", providerNetwork) - } +func (f *Framework) useContext() error { + if f.KubeContext == "" { + return nil } -} -func (f *Framework) WaitSubnetReady(subnet string) error { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - s, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), subnet, metav1.GetOptions{}) - if err != nil { - return err - } - if s.Status.IsReady() { - return nil - } - if s.Status.IsNotValidated() && s.Status.ConditionReason(v1.Validated) != "" { - return fmt.Errorf(s.Status.ConditionReason(v1.Validated)) - } - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for subnet %s to be ready", subnet) - } - } -} - -func (f *Framework) WaitPodReady(pod, namespace string) (*corev1.Pod, error) { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - p, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), pod, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if p.Status.Phase == "Running" && p.Status.Reason != "" { - return p, nil - } - - switch getPodStatus(*p) { - case podCompleted: - return nil, fmt.Errorf("pod already completed") - case podRunning: - return p, nil - case podIniting, podPending, podInitializing, podContainerCreating, podTerminating: - if time.Now().After(deadline) { - return nil, fmt.Errorf("timeout waiting for Pod %s/%s to be ready", namespace, pod) - } - continue - default: - klog.Info(p.String()) - return nil, fmt.Errorf("pod status failed") - } - } -} - -func (f *Framework) WaitPodDeleted(pod, namespace string) error { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - p, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), pod, metav1.GetOptions{}) - if err != nil { - if k8serrors.IsNotFound(err) { - return nil - } - return err - } + pathOptions := clientcmd.NewDefaultPathOptions() + pathOptions.GlobalFile = framework.TestContext.KubeConfig + pathOptions.EnvVar = "" - if status := getPodStatus(*p); status != podTerminating { - return fmt.Errorf("unexpected pod status: %s", status) - } - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for pod %s/%s to be deleted", namespace, pod) - } + config, err := pathOptions.GetStartingConfig() + if err != nil { + return err } -} - -func (f *Framework) WaitDeploymentReady(deployment, namespace string) error { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - deploy, err := f.KubeClientSet.AppsV1().Deployments(namespace).Get(context.Background(), deployment, metav1.GetOptions{}) - if err != nil { - return err - } - if deploy.Status.ReadyReplicas != *deploy.Spec.Replicas { - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for deployment %s/%s to be ready", namespace, deployment) - } - continue - } - pods, err := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deploy.Spec.Template.Labels).String()}) - if err != nil { + if config.CurrentContext != f.KubeContext { + Logf("Switching context to " + f.KubeContext) + config.CurrentContext = f.KubeContext + if err = clientcmd.ModifyConfig(pathOptions, *config, true); err != nil { return err } - - ready := true - for _, pod := range pods.Items { - switch getPodStatus(pod) { - case podCompleted: - return fmt.Errorf("pod already completed") - case podRunning: - continue - case podIniting, podPending, podInitializing, podContainerCreating, podTerminating: - ready = false - default: - klog.Info(pod.String()) - return fmt.Errorf("pod status failed") - } - if !ready { - break - } - } - if ready { - return nil - } - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for deployment %s/%s to be ready", namespace, deployment) - } } -} -func (f *Framework) WaitStatefulsetReady(statefulset, namespace string) error { - deadline := time.Now().Add(time.Second * waitTimeout) - for { - time.Sleep(1 * time.Second) - ss, err := f.KubeClientSet.AppsV1().StatefulSets(namespace).Get(context.Background(), statefulset, metav1.GetOptions{}) - if err != nil { - return err - } - if ss.Status.ReadyReplicas != *ss.Spec.Replicas { - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for statefulset %s/%s to be ready", namespace, statefulset) - } - continue - } - - pods, err := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(ss.Spec.Template.Labels).String()}) - if err != nil { - return err - } - - ready := true - for _, pod := range pods.Items { - switch getPodStatus(pod) { - case podCompleted: - return fmt.Errorf("pod already completed") - case podRunning: - continue - case podIniting, podPending, podInitializing, podContainerCreating, podTerminating: - ready = false - default: - klog.Info(pod.String()) - return fmt.Errorf("pod status failed") - } - if !ready { - break - } - } - if ready { - return nil - } - if time.Now().After(deadline) { - return fmt.Errorf("timeout waiting for statefulset %s/%s to be ready", namespace, statefulset) - } - } + return nil } -func (f *Framework) ExecToPodThroughAPI(command, containerName, podName, namespace string, stdin io.Reader) (string, string, error) { - req := f.KubeClientSet.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(podName). - Namespace(namespace). - SubResource("exec") - scheme := runtime.NewScheme() - if err := corev1.AddToScheme(scheme); err != nil { - return "", "", fmt.Errorf("error adding to scheme: %v", err) - } +func NewFrameworkWithContext(baseName, kubeContext string) *Framework { + f := &Framework{KubeContext: kubeContext} + ginkgo.BeforeEach(f.BeforeEach) - parameterCodec := runtime.NewParameterCodec(scheme) - req.VersionedParams(&corev1.PodExecOptions{ - Command: strings.Fields(command), - Container: containerName, - Stdin: stdin != nil, - Stdout: true, - Stderr: true, - TTY: false, - }, parameterCodec) + f.Framework = framework.NewDefaultFramework(baseName) + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.ClusterIpFamily = os.Getenv("E2E_IP_FAMILY") + f.ClusterVersion = os.Getenv("E2E_BRANCH") + f.ClusterNetworkMode = os.Getenv("E2E_NETWORK_MODE") - exec, err := remotecommand.NewSPDYExecutor(f.KubeConfig, "POST", req.URL()) - if err != nil { - return "", "", fmt.Errorf("error while creating Executor: %v", err) - } - - var stdout, stderr bytes.Buffer - err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ - Stdin: stdin, - Stdout: &stdout, - Stderr: &stderr, - Tty: false, + ginkgo.BeforeEach(func() { + framework.TestContext.Host = "" }) - if err != nil { - return "", "", fmt.Errorf("error in Stream: %v", err) - } - return stdout.String(), stderr.String(), nil + return f } -const ( - podRunning = "Running" - podPending = "Pending" - podCompleted = "Completed" - podContainerCreating = "ContainerCreating" - podInitializing = "PodInitializing" - podTerminating = "Terminating" - podIniting = "Initing" -) - -func getPodContainerStatus(pod corev1.Pod, reason string) string { - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) - } - } - } - return reason +func (f *Framework) IPv6() bool { + return f.ClusterIpFamily == "ipv6" } -func getPodStatus(pod corev1.Pod) string { - reason := string(pod.Status.Phase) - if pod.Status.Reason != "" { - reason = pod.Status.Reason - } - initializing, reason := getPodInitStatus(pod, reason) - if !initializing { - reason = getPodContainerStatus(pod, reason) - } +// BeforeEach gets a kube-ovn client +func (f *Framework) BeforeEach() { + ginkgo.By("Setting kubernetes context") + ExpectNoError(f.useContext()) - if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" { - reason = "Unknown" - } else if pod.DeletionTimestamp != nil { - reason = "Terminating" - } - return reason -} + if f.KubeOVNClientSet == nil { + ginkgo.By("Creating a Kube-OVN client") + config, err := framework.LoadConfig() + ExpectNoError(err) -func getPodInitStatus(pod corev1.Pod, reason string) (bool, string) { - initializing := false - for i := range pod.Status.InitContainerStatuses { - container := pod.Status.InitContainerStatuses[i] - switch { - case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: - continue - case container.State.Terminated != nil: - // initialization is failed - if len(container.State.Terminated.Reason) == 0 { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) - } - } else { - reason = "Init:" + container.State.Terminated.Reason - } - initializing = true - case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": - reason = "Initing:" + container.State.Waiting.Reason - initializing = true - default: - reason = fmt.Sprintf("Initing:%d/%d", i, len(pod.Spec.InitContainers)) - initializing = true - } - break + config.QPS = f.Options.ClientQPS + config.Burst = f.Options.ClientBurst + f.KubeOVNClientSet, err = kubeovncs.NewForConfig(config) + ExpectNoError(err) } - return initializing, reason -} -type netemQos struct { - Latency string `json:"latency"` - Limit string `json:"limit"` - Loss string `json:"loss"` + framework.TestContext.Host = "" } -func GetPodNetemQosPara(podName, podNamespace string) (netemQos, error) { - var qosVal netemQos - - output, err := exec.Command("kubectl", "ko", "vsctl", "kube-ovn-control-plane", "--no-heading", "--columns=other_config", "--bare", "find", "qos", fmt.Sprintf(`external_ids:pod="%s/%s"`, podNamespace, podName)).CombinedOutput() - if err != nil { - return qosVal, err - } - - values := strings.Fields(string(output)) - for _, val := range values { - temp := strings.Split(val, "=") - if len(temp) != 2 { - continue - } - - switch temp[0] { - case "latency": - qosVal.Latency = temp[1] - case "limit": - qosVal.Limit = temp[1] - case "loss": - qosVal.Loss = temp[1] - } - } - - return qosVal, nil +func Describe(text string, body func()) bool { + return ginkgo.Describe("[CNI:Kube-OVN] "+text, body) } -func GetPodHtbQosPara(podName, podNamespace string) (string, string, error) { - var priority, rate string - - output, err := exec.Command("kubectl", "ko", "vsctl", "kube-ovn-control-plane", "--no-heading", "--columns=other_config", "--bare", "find", "queue", fmt.Sprintf(`external_ids:pod="%s/%s"`, podNamespace, podName)).CombinedOutput() - if err != nil { - return "", "", err - } - - values := strings.Fields(string(output)) - for _, val := range values { - temp := strings.Split(val, "=") - if len(temp) != 2 { - continue - } - - switch temp[0] { - case "max-rate": - rate = temp[1] - case "priority": - priority = temp[1] - } - } +func OrderedDescribe(text string, body func()) bool { + return ginkgo.Describe("[CNI:Kube-OVN] "+text, ginkgo.Ordered, body) +} - return priority, rate, nil +// ConformanceIt is wrapper function for ginkgo It. +// Adds "[Conformance]" tag and makes static analysis easier. +func ConformanceIt(text string, body interface{}) bool { + return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body) } diff --git a/test/e2e/framework/image.go b/test/e2e/framework/image.go new file mode 100644 index 00000000000..83bdad7e26f --- /dev/null +++ b/test/e2e/framework/image.go @@ -0,0 +1,7 @@ +package framework + +const ( + PauseImage = "kubeovn/pause:3.2" + BusyBoxImage = "busybox:stable" + AgnhostImage = "kubeovn/agnhost:2.40" +) diff --git a/test/e2e/framework/iproute/iproute.go b/test/e2e/framework/iproute/iproute.go new file mode 100644 index 00000000000..34ef7722b95 --- /dev/null +++ b/test/e2e/framework/iproute/iproute.go @@ -0,0 +1,166 @@ +package iproute + +import ( + "encoding/json" + "fmt" + "net" + "reflect" + "strings" + + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" +) + +type LinkInfo struct { + InfoKind string `json:"info_kind"` +} + +type AddrInfo struct { + Family string `json:"family"` + Local string `json:"local"` + PrefixLen int `json:"prefixlen"` + Broadcast string `json:"broadcast,omitempty"` + Scope string `json:"scope"` + Label string `json:"label,omitempty"` + ValidLifeTime int64 `json:"valid_life_time"` + PreferredLifeTime int64 `json:"preferred_life_time"` + NoDAD bool `json:"nodad,omitempty"` +} + +type Link struct { + IfIndex int `json:"ifindex"` + LinkIndex int `json:"link_index"` + IfName string `json:"ifname"` + Flags []string `json:"flags"` + Mtu int `json:"mtu"` + Qdisc string `json:"qdisc"` + Master string `json:"master"` + OperState string `json:"operstate"` + Group string `json:"group"` + LinkType string `json:"link_type"` + Address string `json:"address"` + Broadcast string `json:"broadcast"` + LinkNetnsID int `json:"link_netnsid"` + Promiscuity int `json:"promiscuity"` + MinMtu int `json:"min_mtu"` + MaxMtu int `json:"max_mtu"` + LinkInfo LinkInfo `json:"linkinfo"` + NumTxQueues int `json:"num_tx_queues"` + NumRxQueues int `json:"num_rx_queues"` + GsoMaxSize int `json:"gso_max_size"` + GsoMaxSegs int `json:"gso_max_segs"` + AddrInfo []AddrInfo `json:"addr_info"` +} + +func (l *Link) NonLinkLocalAddresses() []string { + var result []string + for _, addr := range l.AddrInfo { + if !net.ParseIP(addr.Local).IsLinkLocalUnicast() { + result = append(result, fmt.Sprintf("%s/%d", addr.Local, addr.PrefixLen)) + } + } + return result +} + +type Route struct { + Type string `json:"type"` + Dst string `json:"dst"` + Gateway string `json:"gateway,omitempty"` + Dev string `json:"dev"` + Protocol string `json:"protocol"` + Scope string `json:"scope"` + Metric int `json:"metric"` + Flags []interface{} `json:"flags"` + PrefSrc string `json:"prefsrc,omitempty"` + Pref string `json:"pref"` +} + +type Rule struct { + Priority int `json:"priority"` + Src string `json:"src"` + Table string `json:"table"` + Protocol string `json:"protocol"` + SrcLen int `json:"srclen,omitempty"` +} + +type ExecFunc func(cmd ...string) (stdout, stderr []byte, err error) + +type execer struct { + fn ExecFunc + ignoredErrors []reflect.Type +} + +func (e *execer) exec(cmd string, result interface{}) error { + stdout, stderr, err := e.fn(strings.Fields(cmd)...) + if err != nil { + t := reflect.TypeOf(err) + for _, err := range e.ignoredErrors { + if t == err { + return nil + } + } + return fmt.Errorf("failed to exec cmd %q: %v\nstdout:\n%s\nstderr:\n%s", cmd, err, stdout, stderr) + } + + if err = json.Unmarshal(stdout, result); err != nil { + return fmt.Errorf("failed to decode json %q: %v", string(stdout), err) + } + + return nil +} + +func devArg(device string) string { + if device == "" { + return "" + } + return " dev " + device +} + +func AddressShow(device string, execFunc ExecFunc) ([]Link, error) { + var links []Link + e := execer{fn: execFunc} + if err := e.exec("ip -d -j address show"+devArg(device), &links); err != nil { + return nil, err + } + + return links, nil +} + +func RouteShow(table, device string, execFunc ExecFunc) ([]Route, error) { + e := execer{fn: execFunc} + var args string + if table != "" { + // ignore the following error: + // Error: ipv4/ipv6: FIB table does not exist. + // Dump terminated + e.ignoredErrors = append(e.ignoredErrors, reflect.TypeOf(docker.ErrNonZeroExitCode{})) + args = " table " + table + } + args += devArg(device) + + var routes []Route + if err := e.exec("ip -d -j route show"+args, &routes); err != nil { + return nil, err + } + + var routes6 []Route + if err := e.exec("ip -d -j -6 route show"+args, &routes6); err != nil { + return nil, err + } + + return append(routes, routes6...), nil +} + +func RuleShow(device string, execFunc ExecFunc) ([]Rule, error) { + e := execer{fn: execFunc} + + var rules []Rule + if err := e.exec("ip -d -j rule show"+devArg(device), &rules); err != nil { + return nil, err + } + + var rules6 []Rule + if err := e.exec("ip -d -j -6 rule show"+devArg(device), &rules6); err != nil { + return nil, err + } + return append(rules, rules6...), nil +} diff --git a/test/e2e/framework/kind/kind.go b/test/e2e/framework/kind/kind.go new file mode 100644 index 00000000000..4dc1e5d37f4 --- /dev/null +++ b/test/e2e/framework/kind/kind.go @@ -0,0 +1,176 @@ +package kind + +import ( + "fmt" + "net" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" +) + +const NetworkName = "kind" + +const ( + labelCluster = "io.x-k8s.kind.cluster" + labelRole = "io.x-k8s.kind.role" +) + +type Node struct { + types.Container +} + +func (n *Node) Name() string { + return strings.TrimPrefix(n.Names[0], "/") +} + +func (n *Node) Exec(cmd ...string) (stdout, stderr []byte, err error) { + return docker.Exec(n.ID, nil, cmd...) +} + +func (n *Node) NetworkConnect(networkID string) error { + for _, settings := range n.NetworkSettings.Networks { + if settings.NetworkID == networkID { + return nil + } + } + return docker.NetworkConnect(networkID, n.ID) +} + +func (n *Node) NetworkDisconnect(networkID string) error { + for _, settings := range n.NetworkSettings.Networks { + if settings.NetworkID == networkID { + return docker.NetworkDisconnect(networkID, n.ID) + } + } + return nil +} + +func (n *Node) ListLinks() ([]iproute.Link, error) { + return iproute.AddressShow("", n.Exec) +} + +func (n *Node) ListRoutes(nonLinkLocalUnicast bool) ([]iproute.Route, error) { + routes, err := iproute.RouteShow("", "", n.Exec) + if err != nil { + return nil, err + } + + if !nonLinkLocalUnicast { + return routes, nil + } + + result := make([]iproute.Route, 0, len(routes)) + for _, route := range routes { + if route.Dst == "default" { + result = append(result, route) + } + if ip := net.ParseIP(strings.Split(route.Dst, "/")[0]); !ip.IsLinkLocalUnicast() { + result = append(result, route) + } + } + return result, nil +} + +func (n *Node) WaitLinkToDisappear(linkName string, interval time.Duration, deadline time.Time) error { + err := wait.PollImmediate(interval, time.Until(deadline), func() (bool, error) { + framework.Logf("Waiting for link %s in node %s to disappear", linkName, n.Name()) + links, err := n.ListLinks() + if err != nil { + return false, err + } + for _, link := range links { + if link.IfName == linkName { + framework.Logf("link %s still exists", linkName) + return false, nil + } + } + framework.Logf("link %s no longer exists", linkName) + return true, nil + }) + if err == nil { + return nil + } + if framework.IsTimeout(err) { + return framework.TimeoutError(fmt.Sprintf("timed out while waiting for link %s in node %s to disappear", linkName, n.Name())) + } + + return err +} + +func ListClusters() ([]string, error) { + filters := map[string][]string{"label": {labelCluster}} + nodeList, err := docker.ListContainers(filters) + if err != nil { + return nil, err + } + + var clusters []string + for _, node := range nodeList { + if cluster := node.Labels[labelCluster]; !util.ContainsString(clusters, cluster) { + clusters = append(clusters, node.Labels[labelCluster]) + } + } + + return clusters, nil +} + +func ListNodes(cluster, role string) ([]Node, error) { + labels := []string{labelCluster + "=" + cluster} + if role != "" { + // control-plane or worker + labels = append(labels, labelRole+"="+role) + } + + filters := map[string][]string{"label": labels} + nodeList, err := docker.ListContainers(filters) + if err != nil { + return nil, err + } + + nodes := make([]Node, 0, len(nodeList)) + for _, node := range nodeList { + nodes = append(nodes, Node{node}) + } + + return nodes, nil +} + +func IsKindProvided(providerID string) (string, bool) { + // kind://docker/kube-ovn/kube-ovn-control-plane + u, err := url.Parse(providerID) + if err != nil || u.Scheme != "kind" || u.Host != "docker" { + return "", false + } + + fields := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(fields) != 2 { + return "", false + } + return fields[0], true +} + +func NetworkConnect(networkID string, nodes []Node) error { + for _, node := range nodes { + if err := node.NetworkConnect(networkID); err != nil { + return err + } + } + return nil +} + +func NetworkDisconnect(networkID string, nodes []Node) error { + for _, node := range nodes { + if err := node.NetworkDisconnect(networkID); err != nil { + return err + } + } + return nil +} diff --git a/test/e2e/framework/kube-ovn.go b/test/e2e/framework/kube-ovn.go new file mode 100644 index 00000000000..212557dd7ad --- /dev/null +++ b/test/e2e/framework/kube-ovn.go @@ -0,0 +1,23 @@ +package framework + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" +) + +func GetKubeOvnImage(cs clientset.Interface) string { + ds, err := cs.AppsV1().DaemonSets(KubeOvnNamespace).Get(context.TODO(), DaemonSetOvsOvn, metav1.GetOptions{}) + ExpectNoError(err, "getting daemonset %s/%s", KubeOvnNamespace, DaemonSetOvsOvn) + return ds.Spec.Template.Spec.Containers[0].Image +} + +func GetOvsPodOnNode(cs clientset.Interface, node string) *corev1.Pod { + ds, err := cs.AppsV1().DaemonSets(KubeOvnNamespace).Get(context.TODO(), DaemonSetOvsOvn, metav1.GetOptions{}) + ExpectNoError(err, "getting daemonset %s/%s", KubeOvnNamespace, DaemonSetOvsOvn) + ovsPod, err := GetPodOnNodeForDaemonSet(cs, ds, node) + ExpectNoError(err, "getting daemonset %s/%s running on node %s", KubeOvnNamespace, DaemonSetOvsOvn, node) + return ovsPod +} diff --git a/test/e2e/framework/kubectl.go b/test/e2e/framework/kubectl.go new file mode 100644 index 00000000000..eb7f242d5c4 --- /dev/null +++ b/test/e2e/framework/kubectl.go @@ -0,0 +1,18 @@ +package framework + +import ( + "fmt" + "strings" + + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" +) + +func KubectlExec(namespace, pod string, cmd ...string) (stdout, stderr []byte, err error) { + c := strings.Join(cmd, " ") + outStr, errStr, err := e2epodoutput.RunHostCmdWithFullOutput(namespace, pod, c) + if err != nil { + return nil, nil, fmt.Errorf("failed to exec cmd %q in pod %s/%s: %v\nstderr:\n%s", c, namespace, pod, err, errStr) + } + + return []byte(outStr), []byte(errStr), nil +} diff --git a/test/e2e/framework/log.go b/test/e2e/framework/log.go new file mode 100644 index 00000000000..51b4260cc7e --- /dev/null +++ b/test/e2e/framework/log.go @@ -0,0 +1,101 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "bytes" + "fmt" + "regexp" + "runtime/debug" + "time" + + "github.com/onsi/ginkgo/v2" +) + +func nowStamp() string { + return time.Now().Format(time.StampMilli) +} + +func log(level string, format string, args ...interface{}) { + fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) +} + +// Logf logs the info. +func Logf(format string, args ...interface{}) { + log("INFO", format, args...) +} + +// Failf logs the fail info, including a stack trace starts with its direct caller +// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "g"). +func Failf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + skip := 1 + log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) + ginkgo.Fail(nowStamp()+": "+msg, skip) + panic("unreachable") +} + +// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs +// together with a stack trace and then calls ginkgowrapper.Fail. +func Fail(msg string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) + ginkgo.Fail(nowStamp()+": "+msg, skip) +} + +var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`) + +// PrunedStack is a wrapper around debug.Stack() that removes information +// about the current goroutine and optionally skips some of the initial stack entries. +// With skip == 0, the returned stack will start with the caller of PruneStack. +// From the remaining entries it automatically filters out useless ones like +// entries coming from Ginkgo. +// +// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25: +// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) +// - source code filtering updated to be specific to Kubernetes +// - optimized to use bytes and in-place slice filtering from +// https://github.com/golang/go/wiki/SliceTricks#filter-in-place +func PrunedStack(skip int) []byte { + fullStackTrace := debug.Stack() + stack := bytes.Split(fullStackTrace, []byte("\n")) + // Ensure that the even entries are the method names and + // the odd entries the source code information. + if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+2" is for skipping over: + // - runtime/debug.Stack() + // - PrunedStack() + skip += 2 + if len(stack) > 2*skip { + stack = stack[2*skip:] + } + n := 0 + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !codeFilterRE.Match([]byte(stack[i*2+1])) { + stack[n] = stack[i*2] + stack[n+1] = stack[i*2+1] + n += 2 + } + } + stack = stack[:n] + + return bytes.Join(stack, []byte("\n")) +} diff --git a/test/e2e/framework/pod.go b/test/e2e/framework/pod.go new file mode 100644 index 00000000000..5778231a12f --- /dev/null +++ b/test/e2e/framework/pod.go @@ -0,0 +1,79 @@ +package framework + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + + "github.com/kubeovn/kube-ovn/pkg/util" +) + +type PodClient struct { + *e2epod.PodClient +} + +func (f *Framework) PodClient() *PodClient { + return &PodClient{e2epod.NewPodClient(f.Framework)} +} + +func (c *PodClient) DeleteSync(name string) { + c.PodClient.DeleteSync(name, metav1.DeleteOptions{}, timeout) +} + +func (c *PodClient) PatchPod(original, modified *corev1.Pod) *corev1.Pod { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedPod *corev1.Pod + err = wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + p, err := c.PodInterface.Patch(context.TODO(), original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch pod %s/%s", original.Namespace, original.Name) + } + patchedPod = p + return true, nil + }) + if err == nil { + return patchedPod.DeepCopy() + } + + if IsTimeout(err) { + Failf("timed out while retrying to patch pod %s/%s", original.Namespace, original.Name) + } + ExpectNoError(maybeTimeoutError(err, "patching pod %s/%s", original.Namespace, original.Name)) + + return nil +} + +func MakePod(ns, name string, labels, annotations map[string]string, image string, command, args []string) *corev1.Pod { + if image == "" { + image = PauseImage + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container", + Image: image, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: command, + Args: args, + }, + }, + }, + } + + return pod +} diff --git a/test/e2e/framework/provider-network.go b/test/e2e/framework/provider-network.go new file mode 100644 index 00000000000..d2fea662158 --- /dev/null +++ b/test/e2e/framework/provider-network.go @@ -0,0 +1,226 @@ +package framework + +import ( + "context" + "fmt" + "math/big" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/onsi/gomega" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// ProviderNetworkClient is a struct for provider network client. +type ProviderNetworkClient struct { + f *Framework + v1.ProviderNetworkInterface +} + +func (f *Framework) ProviderNetworkClient() *ProviderNetworkClient { + return &ProviderNetworkClient{ + f: f, + ProviderNetworkInterface: f.KubeOVNClientSet.KubeovnV1().ProviderNetworks(), + } +} + +func (s *ProviderNetworkClient) Get(name string) *apiv1.ProviderNetwork { + pn, err := s.ProviderNetworkInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return pn +} + +// Create creates a new provider network according to the framework specifications +func (c *ProviderNetworkClient) Create(pn *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { + pn, err := c.ProviderNetworkInterface.Create(context.TODO(), pn, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating provider network") + return pn.DeepCopy() +} + +// CreateSync creates a new provider network according to the framework specifications, and waits for it to be ready. +func (c *ProviderNetworkClient) CreateSync(pn *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { + pn = c.Create(pn) + ExpectTrue(c.WaitToBeReady(pn.Name, timeout)) + // Get the newest provider network after it becomes ready + return c.Get(pn.Name).DeepCopy() +} + +// Patch patches the provider network +func (c *ProviderNetworkClient) Patch(original, modified *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedProviderNetwork *apiv1.ProviderNetwork + err = wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + pn, err := c.ProviderNetworkInterface.Patch(context.TODO(), original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch provider network %q", original.Name) + } + patchedProviderNetwork = pn + return true, nil + }) + if err == nil { + return patchedProviderNetwork.DeepCopy() + } + + if IsTimeout(err) { + Failf("timed out while retrying to patch provider network %s", original.Name) + } + ExpectNoError(maybeTimeoutError(err, "patching provider network %s", original.Name)) + + return nil +} + +// PatchSync patches the provider network and waits for the provider network to be ready for `timeout`. +// If the provider network doesn't become ready before the timeout, it will fail the test. +func (c *ProviderNetworkClient) PatchSync(original, modified *apiv1.ProviderNetwork, requiredNodes []string, timeout time.Duration) *apiv1.ProviderNetwork { + pn := c.Patch(original, modified) + ExpectTrue(c.WaitToBeUpdated(pn, timeout)) + ExpectTrue(c.WaitToBeReady(pn.Name, timeout)) + // Get the newest subnet after it becomes ready + return c.Get(pn.Name).DeepCopy() +} + +// Delete deletes a provider network if the provider network exists +func (c *ProviderNetworkClient) Delete(name string) { + err := c.ProviderNetworkInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete provider network %q: %v", name, err) + } +} + +// DeleteSync deletes the provider network and waits for the provider network to disappear for `timeout`. +// If the provider network doesn't disappear before the timeout, it will fail the test. +func (c *ProviderNetworkClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for provider network %q to disappear", name) +} + +func isProviderNetworkConditionSetAsExpected(pn *apiv1.ProviderNetwork, node string, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { + for _, cond := range pn.Status.Conditions { + if cond.Node == node && cond.Type == conditionType { + if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { + return true + } + if !silent { + Logf("Condition %s for node %s of provider network %s is %v instead of %t. Reason: %v, message: %v", + conditionType, node, pn.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + } + if !silent { + Logf("Couldn't find condition %v of node %s on provider network %v", conditionType, node, pn.Name) + } + return false +} + +// IsProviderNetworkConditionSetAsExpected returns a wantTrue value if the subnet has a match to the conditionType, +// otherwise returns an opposite value of the wantTrue with detailed logging. +func IsProviderNetworkConditionSetAsExpected(pn *apiv1.ProviderNetwork, node string, conditionType apiv1.ConditionType, wantTrue bool) bool { + return isProviderNetworkConditionSetAsExpected(pn, node, conditionType, wantTrue, false) +} + +// WaitConditionToBe returns whether provider network "name's" condition state matches wantTrue +// within timeout. If wantTrue is true, it will ensure the provider network condition status is +// ConditionTrue; if it's false, it ensures the provider network condition is in any state other +// than ConditionTrue (e.g. not true or unknown). +func (c *ProviderNetworkClient) WaitConditionToBe(name, node string, conditionType apiv1.ConditionType, wantTrue bool, deadline time.Time) bool { + timeout := time.Until(deadline) + Logf("Waiting up to %v for provider network %s condition %s of node %s to be %t", timeout, name, conditionType, node, wantTrue) + for ; time.Now().Before(deadline); time.Sleep(poll) { + if pn := c.Get(name); IsProviderNetworkConditionSetAsExpected(pn, node, conditionType, wantTrue) { + return true + } + } + Logf("ProviderNetwork %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) + return false +} + +// WaitToBeReady returns whether the provider network is ready within timeout. +func (c *ProviderNetworkClient) WaitToBeReady(name string, timeout time.Duration) bool { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + if c.Get(name).Status.Ready { + return true + } + } + return false +} + +// WaitToBeUpdated returns whether the provider network is updated within timeout. +func (c *ProviderNetworkClient) WaitToBeUpdated(pn *apiv1.ProviderNetwork, timeout time.Duration) bool { + Logf("Waiting up to %v for provider network %s to be updated", timeout, pn.Name) + rv, _ := big.NewInt(0).SetString(pn.ResourceVersion, 10) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + s := c.Get(pn.Name) + if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { + return true + } + } + Logf("ProviderNetwork %s was not updated within %v", pn.Name, timeout) + return false +} + +// WaitToDisappear waits the given timeout duration for the specified provider network to disappear. +func (c *ProviderNetworkClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + var lastProviderNetwork *apiv1.ProviderNetwork + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + Logf("Waiting for provider network %s to disappear", name) + subnets, err := c.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return handleWaitingAPIError(err, true, "listing subnets") + } + found := false + for i, subnet := range subnets.Items { + if subnet.Name == name { + Logf("Provider network %s still exists", name) + found = true + lastProviderNetwork = &(subnets.Items[i]) + break + } + } + if !found { + Logf("Provider network %s no longer exists", name) + return true, nil + } + return false, nil + }) + if err == nil { + return nil + } + if IsTimeout(err) { + return TimeoutError(fmt.Sprintf("timed out while waiting for subnet %s to disappear", name), + lastProviderNetwork, + ) + } + return maybeTimeoutError(err, "waiting for subnet %s to disappear", name) +} + +func MakeProviderNetwork(name string, exchangeLinkName bool, defaultInterface string, customInterfaces map[string][]string, excludeNodes []string) *apiv1.ProviderNetwork { + pn := &apiv1.ProviderNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.ProviderNetworkSpec{ + DefaultInterface: defaultInterface, + ExcludeNodes: excludeNodes, + ExchangeLinkName: exchangeLinkName, + }, + } + for iface, nodes := range customInterfaces { + ci := apiv1.CustomInterface{ + Interface: iface, + Nodes: nodes, + } + pn.Spec.CustomInterfaces = append(pn.Spec.CustomInterfaces, ci) + } + return pn +} diff --git a/test/e2e/framework/service.go b/test/e2e/framework/service.go new file mode 100644 index 00000000000..cff7c2d456b --- /dev/null +++ b/test/e2e/framework/service.go @@ -0,0 +1,131 @@ +package framework + +import ( + "context" + "fmt" + "math/big" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/onsi/gomega" +) + +// ServiceClient is a struct for service client. +type ServiceClient struct { + f *Framework + v1core.ServiceInterface +} + +func (f *Framework) ServiceClient() *ServiceClient { + return &ServiceClient{ + f: f, + ServiceInterface: f.ClientSet.CoreV1().Services(f.Namespace.Name), + } +} + +func (s *ServiceClient) Get(name string) *corev1.Service { + service, err := s.ServiceInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return service +} + +// Create creates a new service according to the framework specifications +func (c *ServiceClient) Create(service *corev1.Service) *corev1.Service { + s, err := c.ServiceInterface.Create(context.TODO(), service, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating service") + return s.DeepCopy() +} + +// CreateSync creates a new service according to the framework specifications, and waits for it to be updated. +func (c *ServiceClient) CreateSync(service *corev1.Service) *corev1.Service { + s := c.Create(service) + ExpectTrue(c.WaitToBeUpdated(s)) + // Get the newest service + return c.Get(s.Name).DeepCopy() +} + +// Delete deletes a service if the service exists +func (c *ServiceClient) Delete(name string) { + err := c.ServiceInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete service %q: %v", name, err) + } +} + +// DeleteSync deletes the service and waits for the service to disappear for `timeout`. +// If the service doesn't disappear before the timeout, it will fail the test. +func (c *ServiceClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for service %q to disappear", name) +} + +// WaitToBeUpdated returns whether the service is updated within timeout. +func (c *ServiceClient) WaitToBeUpdated(service *corev1.Service) bool { + Logf("Waiting up to %v for service %s to be updated", timeout, service.Name) + rv, _ := big.NewInt(0).SetString(service.ResourceVersion, 10) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + s := c.Get(service.Name) + if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { + return true + } + } + Logf("Service %s was not updated within %v", service.Name, timeout) + return false +} + +// WaitToDisappear waits the given timeout duration for the specified service to disappear. +func (c *ServiceClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + var lastService *corev1.Service + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + Logf("Waiting for service %s to disappear", name) + services, err := c.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return handleWaitingAPIError(err, true, "listing services") + } + found := false + for i, service := range services.Items { + if service.Name == name { + Logf("Service %s still exists", name) + found = true + lastService = &(services.Items[i]) + break + } + } + if !found { + Logf("Service %s no longer exists", name) + return true, nil + } + return false, nil + }) + if err == nil { + return nil + } + if IsTimeout(err) { + return TimeoutError(fmt.Sprintf("timed out while waiting for service %s to disappear", name), + lastService, + ) + } + return maybeTimeoutError(err, "waiting for service %s to disappear", name) +} + +func MakeService(name string, svcType corev1.ServiceType, annotations, selector map[string]string, ports []corev1.ServicePort, affinity corev1.ServiceAffinity) *corev1.Service { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + }, + Spec: corev1.ServiceSpec{ + Ports: ports, + Selector: selector, + SessionAffinity: affinity, + Type: svcType, + }, + } + + return service +} diff --git a/test/e2e/framework/subnet.go b/test/e2e/framework/subnet.go new file mode 100644 index 00000000000..b0c0f46fb91 --- /dev/null +++ b/test/e2e/framework/subnet.go @@ -0,0 +1,258 @@ +package framework + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/onsi/gomega" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// SubnetClient is a struct for subnet client. +type SubnetClient struct { + f *Framework + v1.SubnetInterface +} + +func (f *Framework) SubnetClient() *SubnetClient { + return &SubnetClient{ + f: f, + SubnetInterface: f.KubeOVNClientSet.KubeovnV1().Subnets(), + } +} + +func (s *SubnetClient) Get(name string) *apiv1.Subnet { + subnet, err := s.SubnetInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return subnet +} + +// Create creates a new subnet according to the framework specifications +func (c *SubnetClient) Create(subnet *apiv1.Subnet) *apiv1.Subnet { + s, err := c.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating subnet") + return s.DeepCopy() +} + +// CreateSync creates a new subnet according to the framework specifications, and waits for it to be ready. +func (c *SubnetClient) CreateSync(subnet *apiv1.Subnet) *apiv1.Subnet { + s := c.Create(subnet) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest subnet after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Update updates the subnet +func (c *SubnetClient) Update(subnet *apiv1.Subnet, options metav1.UpdateOptions, timeout time.Duration) *apiv1.Subnet { + var updatedSubnet *apiv1.Subnet + err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + s, err := c.SubnetInterface.Update(context.TODO(), subnet, options) + if err != nil { + return handleWaitingAPIError(err, false, "update subnet %q", subnet.Name) + } + updatedSubnet = s + return true, nil + }) + if err == nil { + return updatedSubnet.DeepCopy() + } + + if IsTimeout(err) { + Failf("timed out while retrying to update subnet %s", subnet.Name) + } + ExpectNoError(maybeTimeoutError(err, "updating subnet %s", subnet.Name)) + + return nil +} + +// UpdateSync updates the subnet and waits for the subnet to be ready for `timeout`. +// If the subnet doesn't become ready before the timeout, it will fail the test. +func (c *SubnetClient) UpdateSync(subnet *apiv1.Subnet, options metav1.UpdateOptions, timeout time.Duration) *apiv1.Subnet { + s := c.Update(subnet, options, timeout) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest subnet after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Patch patches the subnet +func (c *SubnetClient) Patch(original, modified *apiv1.Subnet, timeout time.Duration) *apiv1.Subnet { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedSubnet *apiv1.Subnet + err = wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + s, err := c.SubnetInterface.Patch(context.TODO(), original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch subnet %q", original.Name) + } + patchedSubnet = s + return true, nil + }) + if err == nil { + return patchedSubnet.DeepCopy() + } + + if IsTimeout(err) { + Failf("timed out while retrying to patch subnet %s", original.Name) + } + ExpectNoError(maybeTimeoutError(err, "patching subnet %s", original.Name)) + + return nil +} + +// PatchSync patches the subnet and waits for the subnet to be ready for `timeout`. +// If the subnet doesn't become ready before the timeout, it will fail the test. +func (c *SubnetClient) PatchSync(original, modified *apiv1.Subnet) *apiv1.Subnet { + s := c.Patch(original, modified, timeout) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest subnet after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Delete deletes a subnet if the subnet exists +func (c *SubnetClient) Delete(name string) { + err := c.SubnetInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete subnet %q: %v", name, err) + } +} + +// DeleteSync deletes the subnet and waits for the subnet to disappear for `timeout`. +// If the subnet doesn't disappear before the timeout, it will fail the test. +func (c *SubnetClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for subnet %q to disappear", name) +} + +func isSubnetConditionSetAsExpected(subnet *apiv1.Subnet, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { + for _, cond := range subnet.Status.Conditions { + if cond.Type == conditionType { + if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { + return true + } + if !silent { + Logf("Condition %s of subnet %s is %v instead of %t. Reason: %v, message: %v", + conditionType, subnet.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + } + if !silent { + Logf("Couldn't find condition %v on subnet %v", conditionType, subnet.Name) + } + return false +} + +// IsSubnetConditionSetAsExpected returns a wantTrue value if the subnet has a match to the conditionType, +// otherwise returns an opposite value of the wantTrue with detailed logging. +func IsSubnetConditionSetAsExpected(subnet *apiv1.Subnet, conditionType apiv1.ConditionType, wantTrue bool) bool { + return isSubnetConditionSetAsExpected(subnet, conditionType, wantTrue, false) +} + +// WaitConditionToBe returns whether subnet "name's" condition state matches wantTrue +// within timeout. If wantTrue is true, it will ensure the subnet condition status is +// ConditionTrue; if it's false, it ensures the subnet condition is in any state other +// than ConditionTrue (e.g. not true or unknown). +func (c *SubnetClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { + Logf("Waiting up to %v for subnet %s condition %s to be %t", timeout, name, conditionType, wantTrue) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + subnet := c.Get(name) + if IsSubnetConditionSetAsExpected(subnet, conditionType, wantTrue) { + return true + } + } + Logf("Subnet %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) + return false +} + +// WaitToBeReady returns whether the subnet is ready within timeout. +func (c *SubnetClient) WaitToBeReady(name string, timeout time.Duration) bool { + return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) +} + +// WaitToBeUpdated returns whether the subnet is updated within timeout. +func (c *SubnetClient) WaitToBeUpdated(subnet *apiv1.Subnet, timeout time.Duration) bool { + Logf("Waiting up to %v for subnet %s to be updated", timeout, subnet.Name) + rv, _ := big.NewInt(0).SetString(subnet.ResourceVersion, 10) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + s := c.Get(subnet.Name) + if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { + return true + } + } + Logf("Subnet %s was not updated within %v", subnet.Name, timeout) + return false +} + +// WaitToDisappear waits the given timeout duration for the specified subnet to disappear. +func (c *SubnetClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + var lastSubnet *apiv1.Subnet + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + Logf("Waiting for subnet %s to disappear", name) + subnets, err := c.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return handleWaitingAPIError(err, true, "listing subnets") + } + found := false + for i, subnet := range subnets.Items { + if subnet.Name == name { + Logf("Subnet %s still exists", name) + found = true + lastSubnet = &(subnets.Items[i]) + break + } + } + if !found { + Logf("Subnet %s no longer exists", name) + return true, nil + } + return false, nil + }) + if err == nil { + return nil + } + if IsTimeout(err) { + return TimeoutError(fmt.Sprintf("timed out while waiting for subnet %s to disappear", name), + lastSubnet, + ) + } + return maybeTimeoutError(err, "waiting for subnet %s to disappear", name) +} + +func MakeSubnet(name, vlan, cidr, gateway string, excludeIPs, gatewayNodes, namespaces []string) *apiv1.Subnet { + subnet := &apiv1.Subnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.SubnetSpec{ + Vlan: vlan, + CIDRBlock: cidr, + Gateway: gateway, + Protocol: util.CheckProtocol(cidr), + ExcludeIps: excludeIPs, + GatewayNode: strings.Join(gatewayNodes, ","), + Namespaces: namespaces, + }, + } + if len(gatewayNodes) != 0 { + subnet.Spec.GatewayType = apiv1.GWCentralizedType + } else { + subnet.Spec.GatewayType = apiv1.GWDistributedType + } + + return subnet +} diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go new file mode 100644 index 00000000000..27d08c7763e --- /dev/null +++ b/test/e2e/framework/util.go @@ -0,0 +1,137 @@ +package framework + +import ( + "fmt" + "math/big" + "math/rand" + "net" + "sort" + "strings" + "time" + + "github.com/kubeovn/kube-ovn/pkg/util" +) + +const ( + KubeOvnNamespace = "kube-system" + DaemonSetOvsOvn = "ovs-ovn" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// RandomSuffix provides a random sequence to append to resources. +func RandomSuffix() string { + return fmt.Sprintf("%04d", rand.Intn(10000)) +} + +func RandomCIDR(family string) string { + fnIPv4 := func() string { + cidr := net.IPNet{ + IP: net.ParseIP("10.99.0.0").To4(), + Mask: net.CIDRMask(24, 32), + } + cidr.IP[2] = byte(rand.Intn(0xff + 1)) + return cidr.String() + } + + fnIPv6 := func() string { + cidr := net.IPNet{ + IP: net.ParseIP("fc00:10:ff::").To16(), + Mask: net.CIDRMask(96, 128), + } + cidr.IP[10] = byte(rand.Intn(0xff + 1)) + cidr.IP[11] = byte(rand.Intn(0xff + 1)) + return cidr.String() + } + + switch family { + case "ipv4": + return fnIPv4() + case "ipv6": + return fnIPv6() + case "dual": + return fnIPv4() + "," + fnIPv6() + default: + Failf("invalid ip family: %q", family) + return "" + } +} + +func sortIPs(ips []string) { + sort.Slice(ips, func(i, j int) bool { + return util.Ip2BigInt(ips[i]).Cmp(util.Ip2BigInt(ips[j])) < 0 + }) +} + +// ipv4/ipv6 only +func RandomExcludeIPs(cidr string, count int) []string { + if cidr == "" || count == 0 { + return nil + } + + rangeCount := rand.Intn(count + 1) + ips := strings.Split(RandomIPPool(cidr, rangeCount*2+count-rangeCount), ";") + sortIPs(ips) + + var idx int + rangeLeft := rangeCount + ret := make([]string, 0, count) + for i := 0; i < count; i++ { + if rangeLeft != 0 && rand.Intn(count-i) < rangeLeft { + ret = append(ret, fmt.Sprintf("%s..%s", ips[idx], ips[idx+1])) + idx++ + rangeLeft-- + } else { + ret = append(ret, ips[idx]) + } + idx++ + } + + return ret +} + +func RandomIPPool(cidr string, count int) string { + fn := func(cidr string) []string { + if cidr == "" { + return nil + } + + firstIP, _ := util.FirstIP(cidr) + _, ipnet, _ := net.ParseCIDR(cidr) + + base := util.Ip2BigInt(firstIP) + base.Add(base, big.NewInt(1)) + prefix, size := ipnet.Mask.Size() + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + max := big.NewInt(0).Exp(big.NewInt(2), big.NewInt(int64(size-prefix)), nil) + max.Sub(max, big.NewInt(3)) + + ips := make([]string, 0, count) + for len(ips) != count { + n := big.NewInt(0).Rand(rnd, max) + if ip := util.BigInt2Ip(n.Add(n, base)); !util.ContainsString(ips, ip) { + ips = append(ips, ip) + } + } + return ips + } + + cidrV4, cidrV6 := util.SplitStringIP(cidr) + ipsV4, ipsV6 := fn(cidrV4), fn(cidrV6) + + dual := make([]string, 0, count) + for i := 0; i < count; i++ { + var ips []string + if i < len(ipsV4) { + ips = append(ips, ipsV4[i]) + } + if i < len(ipsV6) { + ips = append(ips, ipsV6[i]) + } + dual = append(dual, strings.Join(ips, ",")) + } + + return strings.Join(dual, ";") +} diff --git a/test/e2e/framework/vlan.go b/test/e2e/framework/vlan.go new file mode 100644 index 00000000000..2c96802549c --- /dev/null +++ b/test/e2e/framework/vlan.go @@ -0,0 +1,88 @@ +package framework + +import ( + "context" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// VlanClient is a struct for vlan client. +type VlanClient struct { + f *Framework + v1.VlanInterface +} + +func (f *Framework) VlanClient() *VlanClient { + return &VlanClient{ + f: f, + VlanInterface: f.KubeOVNClientSet.KubeovnV1().Vlans(), + } +} + +func (s *VlanClient) Get(name string) *apiv1.Vlan { + vlan, err := s.VlanInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return vlan +} + +// Create creates a new vlan according to the framework specifications +func (c *VlanClient) Create(pn *apiv1.Vlan) *apiv1.Vlan { + vlan, err := c.VlanInterface.Create(context.TODO(), pn, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating vlan") + return vlan.DeepCopy() +} + +// Patch patches the vlan +func (c *VlanClient) Patch(original, modified *apiv1.Vlan, timeout time.Duration) *apiv1.Vlan { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedVlan *apiv1.Vlan + err = wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + pn, err := c.VlanInterface.Patch(context.TODO(), original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch vlan %q", original.Name) + } + patchedVlan = pn + return true, nil + }) + if err == nil { + return patchedVlan.DeepCopy() + } + + if IsTimeout(err) { + Failf("timed out while retrying to patch vlan %s", original.Name) + } + ExpectNoError(maybeTimeoutError(err, "patching vlan %s", original.Name)) + + return nil +} + +// Delete deletes a vlan if the vlan exists +func (c *VlanClient) Delete(name string, options metav1.DeleteOptions) { + err := c.VlanInterface.Delete(context.TODO(), name, options) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete vlan %q: %v", name, err) + } +} + +func MakeVlan(name, provider string, id int) *apiv1.Vlan { + vlan := &apiv1.Vlan{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.VlanSpec{ + Provider: provider, + ID: id, + }, + } + return vlan +} diff --git a/test/e2e/framework/wait.go b/test/e2e/framework/wait.go new file mode 100644 index 00000000000..e40985d8f7b --- /dev/null +++ b/test/e2e/framework/wait.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +type timeoutError struct { + msg string + observedObjects []interface{} +} + +func (e *timeoutError) Error() string { + return e.msg +} + +func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError { + return &timeoutError{ + msg: msg, + observedObjects: observedObjects, + } +} + +// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err. +// taskFormat and taskArgs should be the task being performed when the error occurred, +// e.g. "waiting for pod to be running". +func maybeTimeoutError(err error, taskFormat string, taskArgs ...interface{}) error { + if IsTimeout(err) { + return TimeoutError(fmt.Sprintf("timed out while "+taskFormat, taskArgs...)) + } else if err != nil { + return fmt.Errorf("error while %s: %w", fmt.Sprintf(taskFormat, taskArgs...), err) + } else { + return nil + } +} + +func IsTimeout(err error) bool { + if err == wait.ErrWaitTimeout { + return true + } + if _, ok := err.(*timeoutError); ok { + return true + } + return false +} + +// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function. +// If the error is retryable, sleep the recommended delay and ignore the error. +// If the erorr is terminal, return it. +func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) { + taskDescription := fmt.Sprintf(taskFormat, taskArgs...) + if retryNotFound && apierrors.IsNotFound(err) { + Logf("Ignoring NotFound error while " + taskDescription) + return false, nil + } + if retry, delay := shouldRetry(err); retry { + Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err) + if delay > 0 { + time.Sleep(delay) + } + return false, nil + } + Logf("Encountered non-retryable error while %s: %v", taskDescription, err) + return false, err +} + +// Decide whether to retry an API request. Optionally include a delay to retry after. +func shouldRetry(err error) (retry bool, retryAfter time.Duration) { + // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. + if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { + return shouldRetry, time.Duration(delay) * time.Second + } + + // these errors indicate a transient error that should be retried. + if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { + return true, 0 + } + + return false, 0 +} diff --git a/test/e2e/ip/static_ip.go b/test/e2e/ip/static_ip.go deleted file mode 100644 index a58f6f6e859..00000000000 --- a/test/e2e/ip/static_ip.go +++ /dev/null @@ -1,350 +0,0 @@ -package ip - -import ( - "context" - "fmt" - "os" - "sort" - "strings" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const testImage = "kubeovn/pause:3.2" - -var _ = Describe("[IP Allocation]", func() { - namespace := "static-ip" - f := framework.NewFramework("ip allocation", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - Describe("static pod ip", func() { - It("normal ip", func() { - name := f.GetName() - ip, mac := "12.10.0.10", "00:00:00:53:6B:B6" - autoMount := false - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{ - util.IpAddressAnnotation: ip, - util.MacAddressAnnotation: mac, - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - - By("Create pod") - _, err := f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - pod, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[util.AllocatedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.RoutedAnnotation]).To(Equal("true")) - - time.Sleep(1 * time.Second) - podIP, err := f.OvnClientSet.KubeovnV1().IPs().Get(context.Background(), fmt.Sprintf("%s.%s", name, namespace), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(podIP.Spec.V4IPAddress).To(Equal(ip)) - Expect(podIP.Spec.MacAddress).To(Equal(mac)) - - By("Delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("deployment with ippool", func() { - name := f.GetName() - var replicas int32 = 3 - ips := []string{"12.10.0.20", "12.10.0.21", "12.10.0.22"} - autoMount := false - deployment := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"apps": name}}, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "apps": name, - "e2e": "true", - }, - Annotations: map[string]string{ - util.IpPoolAnnotation: strings.Join(ips, ";"), - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - }, - }, - } - - By("Create deployment") - _, err := f.KubeClientSet.AppsV1().Deployments(namespace).Create(context.Background(), &deployment, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitDeploymentReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - pods, err := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.Labels).String()}) - Expect(err).NotTo(HaveOccurred()) - Expect(pods.Items).To(HaveLen(int(replicas))) - - podIPs := make([]string, replicas) - for i := range pods.Items { - podIPs[i] = pods.Items[i].Status.PodIP - } - sort.Strings(podIPs) - Expect(podIPs).To(Equal(ips)) - - By("Delete pods and recreate") - err = f.KubeClientSet.CoreV1().Pods(namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.Labels).String()}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitDeploymentReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - pods, err = f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.Labels).String()}) - Expect(err).NotTo(HaveOccurred()) - Expect(pods.Items).To(HaveLen(int(replicas))) - - for i := range pods.Items { - podIPs[i] = pods.Items[i].Status.PodIP - } - sort.Strings(podIPs) - Expect(podIPs).To(Equal(ips)) - - By("Delete deployment") - err = f.KubeClientSet.AppsV1().Deployments(namespace).Delete(context.Background(), deployment.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("statefulset with ippool", func() { - name := f.GetName() - var replicas int32 = 3 - ips := []string{"12.10.0.31", "12.10.0.32", "12.10.0.30"} - autoMount := false - sts := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"apps": name}}, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "apps": name, - "e2e": "true", - }, - Annotations: map[string]string{ - util.IpPoolAnnotation: strings.Join(ips, ";"), - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - }, - }, - } - - By("Create statefulset") - _, err := f.KubeClientSet.AppsV1().StatefulSets(namespace).Create(context.Background(), &sts, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - for i := range ips { - pod, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Status.PodIP).To(Equal(ips[i])) - } - - By("Delete statefulset") - err = f.KubeClientSet.AppsV1().StatefulSets(namespace).Delete(context.Background(), sts.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("statefulset without ippool", func() { - name := f.GetName() - var replicas int32 = 3 - autoMount := false - sts := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"apps": name}}, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "apps": name, - "e2e": "true", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - }, - }, - } - - By("Create statefulset") - _, err := f.KubeClientSet.AppsV1().StatefulSets(namespace).Create(context.Background(), &sts, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - ips := make([]string, replicas) - for i := range ips { - pod, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - ips[i] = pod.Status.PodIP - } - - err = f.KubeClientSet.CoreV1().Pods(namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(sts.Spec.Template.Labels).String()}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - for i := range ips { - pod, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Status.PodIP).To(Equal(ips[i])) - } - - By("Delete statefulset") - err = f.KubeClientSet.AppsV1().StatefulSets(namespace).Delete(context.Background(), sts.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("force delete statefulset pod", func() { - name := f.GetName() - var replicas int32 = 3 - autoMount := false - sts := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"apps": name}}, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "apps": name, - "e2e": "true", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - }, - }, - } - - By("Create statefulset") - _, err := f.KubeClientSet.AppsV1().StatefulSets(namespace).Create(context.Background(), &sts, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - ips := make([]string, replicas) - for i := range ips { - pod, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - ips[i] = pod.Status.PodIP - } - - err = f.KubeClientSet.CoreV1().Pods(namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(sts.Spec.Template.Labels).String()}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - for i := range ips { - pod, err := f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Status.PodIP).To(Equal(ips[i])) - } - - By("Force delete statefulset Pod") - var gracePeriodSeconds int64 - for i := 0; i < int(replicas); i++ { - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), fmt.Sprintf("%s-%d", name, i), metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) - Expect(err).NotTo(HaveOccurred()) - } - err = f.WaitStatefulsetReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("Delete statefulset") - err = f.KubeClientSet.AppsV1().StatefulSets(namespace).Delete(context.Background(), sts.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - }) -}) diff --git a/test/k8s-network/e2e_test.go b/test/e2e/k8s-network/e2e_test.go similarity index 80% rename from test/k8s-network/e2e_test.go rename to test/e2e/k8s-network/e2e_test.go index 46d776c15cf..4ea81101dd0 100644 --- a/test/k8s-network/e2e_test.go +++ b/test/e2e/k8s-network/e2e_test.go @@ -2,14 +2,17 @@ package k8s_network import ( "flag" + "os" + "path/filepath" "testing" - "github.com/onsi/ginkgo/v2" "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" + "github.com/onsi/ginkgo/v2" + // Import tests. _ "k8s.io/kubernetes/test/e2e/network" ) @@ -24,6 +27,9 @@ func init() { // Parse all the flags flag.Parse() + if framework.TestContext.KubeConfig == "" { + framework.TestContext.KubeConfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } framework.AfterReadingAllFlags(&framework.TestContext) } diff --git a/test/e2e/kube-ovn/e2e_test.go b/test/e2e/kube-ovn/e2e_test.go new file mode 100644 index 00000000000..225cf18bb83 --- /dev/null +++ b/test/e2e/kube-ovn/e2e_test.go @@ -0,0 +1,43 @@ +package kube_ovn + +import ( + "flag" + "os" + "path/filepath" + "testing" + + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + + "github.com/onsi/ginkgo/v2" + + // Import tests. + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/ipam" + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/kubectl-ko" + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/node" + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/qos" + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/subnet" + _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/underlay" +) + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + + // Parse all the flags + flag.Parse() + if framework.TestContext.KubeConfig == "" { + framework.TestContext.KubeConfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + framework.AfterReadingAllFlags(&framework.TestContext) +} + +func TestE2E(t *testing.T) { + e2e.RunE2ETests(t) +} diff --git a/test/e2e/kube-ovn/ipam/ipam.go b/test/e2e/kube-ovn/ipam/ipam.go new file mode 100644 index 00000000000..a42acc3ee98 --- /dev/null +++ b/test/e2e/kube-ovn/ipam/ipam.go @@ -0,0 +1,286 @@ +package ipam + +import ( + "context" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework/deployment" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/framework/statefulset" + + "github.com/onsi/ginkgo/v2" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +var _ = framework.Describe("[group:ipam]", func() { + f := framework.NewDefaultFramework("ipam") + + var cs clientset.Interface + var podClient *framework.PodClient + var subnetClient *framework.SubnetClient + var namespaceName, subnetName string + var subnet *apiv1.Subnet + var cidr string + + ginkgo.BeforeEach(func() { + cs = f.ClientSet + podClient = f.PodClient() + subnetClient = f.SubnetClient() + namespaceName = f.Namespace.Name + subnetName = namespaceName + cidr = framework.RandomCIDR(f.ClusterIpFamily) + + ginkgo.By("Creating subnet " + subnetName) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, nil, []string{namespaceName}) + subnet = subnetClient.CreateSync(subnet) + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + }) + + framework.ConformanceIt("should allocate static ipv4 and mac for pod", func() { + name := "pod-" + framework.RandomSuffix() + mac := util.GenerateMac() + ip := framework.RandomIPPool(cidr, 1) + + ginkgo.By("Creating pod " + name + " with ip " + ip + " and mac " + mac) + annotations := map[string]string{ + util.IpAddressAnnotation: ip, + util.MacAddressAnnotation: mac, + } + pod := framework.MakePod(namespaceName, name, nil, annotations, "", nil, nil) + pod = podClient.CreateSync(pod) + + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpAddressAnnotation, ip) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.MacAddressAnnotation, mac) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(ip, ",")) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) + + framework.ConformanceIt("should allocate static ipv4 for deployment with ippool", func() { + replicas := 3 + name := "deployment-" + framework.RandomSuffix() + ippool := framework.RandomIPPool(cidr, replicas) + labels := map[string]string{"app": name} + + ginkgo.By("Creating deployment " + name + " with ippool " + ippool) + deploy := deployment.NewDeployment(name, int32(replicas), labels, "pause", framework.PauseImage, "") + deploy.Spec.Template.Annotations = map[string]string{util.IpPoolAnnotation: ippool} + deploy, err := cs.AppsV1().Deployments(namespaceName).Create(context.TODO(), deploy, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to to create deployment") + err = deployment.WaitForDeploymentComplete(cs, deploy) + framework.ExpectNoError(err, "deployment failed to complete") + + ginkgo.By("Getting pods for deployment " + name) + pods, err := deployment.GetPodsForDeployment(cs, deploy) + framework.ExpectNoError(err, "failed to get pods for deployment "+name) + framework.ExpectHaveLen(pods.Items, replicas) + + ips := strings.Split(ippool, ";") + for _, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpPoolAnnotation, ippool) + framework.ExpectContainElement(ips, pod.Annotations[util.IpAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) + } + + ginkgo.By("Deleting pods for deployment " + name) + for _, pod := range pods.Items { + err = podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod "+pod.Name) + } + err = deployment.WaitForDeploymentComplete(cs, deploy) + framework.ExpectNoError(err, "deployment failed to complete") + + ginkgo.By("Waiting for new pods to be ready") + err = e2epod.WaitForPodsRunningReady(cs, namespaceName, *deploy.Spec.Replicas, 0, time.Minute, nil) + framework.ExpectNoError(err, "timed out waiting for pods to be ready") + + ginkgo.By("Getting pods for deployment " + name + " after deletion") + pods, err = deployment.GetPodsForDeployment(cs, deploy) + framework.ExpectNoError(err, "failed to get pods for deployment "+name) + framework.ExpectHaveLen(pods.Items, replicas) + for _, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpPoolAnnotation, ippool) + framework.ExpectContainElement(ips, pod.Annotations[util.IpAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) + } + + ginkgo.By("Deleting deployment " + name) + err = cs.AppsV1().Deployments(namespaceName).Delete(context.TODO(), name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete deployment "+name) + }) + + framework.ConformanceIt("should allocate static ipv4 for statefulset", func() { + replicas := 3 + name := "statefulset-" + framework.RandomSuffix() + labels := map[string]string{"app": name} + + ginkgo.By("Creating statefulset " + name) + sts := statefulset.NewStatefulSet(name, namespaceName, name, int32(replicas), nil, nil, labels) + sts.Spec.Template.Spec.Containers[0].Image = framework.PauseImage + sts, err := cs.AppsV1().StatefulSets(namespaceName).Create(context.TODO(), sts, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to to create statefulset") + statefulset.WaitForRunningAndReady(cs, int32(replicas), sts) + + ginkgo.By("Getting pods for statefulset " + name) + pods := statefulset.GetPodList(cs, sts) + framework.ExpectHaveLen(pods.Items, replicas) + statefulset.SortStatefulPods(pods) + + ips := make([]string, 0, replicas) + for _, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) + ips = append(ips, pod.Annotations[util.IpAddressAnnotation]) + } + + ginkgo.By("Deleting pods for statefulset " + name) + for _, pod := range pods.Items { + err = podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod "+pod.Name) + } + statefulset.WaitForRunningAndReady(cs, int32(replicas), sts) + + ginkgo.By("Getting pods for statefulset " + name) + pods = statefulset.GetPodList(cs, sts) + framework.ExpectHaveLen(pods.Items, replicas) + statefulset.SortStatefulPods(pods) + + for i, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpAddressAnnotation, ips[i]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + } + + ginkgo.By("Deleting statefulset " + name) + err = cs.AppsV1().StatefulSets(namespaceName).Delete(context.TODO(), name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete statefulset "+name) + }) + + framework.ConformanceIt("should allocate static ipv4 for statefulset with ippool", func() { + replicas := 3 + name := "statefulset-" + framework.RandomSuffix() + ippool := framework.RandomIPPool(cidr, replicas) + labels := map[string]string{"app": name} + + ginkgo.By("Creating statefulset " + name + " with ippool " + ippool) + sts := statefulset.NewStatefulSet(name, namespaceName, name, int32(replicas), nil, nil, labels) + sts.Spec.Template.Spec.Containers[0].Image = framework.PauseImage + sts.Spec.Template.Annotations = map[string]string{util.IpPoolAnnotation: ippool} + sts, err := cs.AppsV1().StatefulSets(namespaceName).Create(context.TODO(), sts, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to to create statefulset") + statefulset.WaitForRunningAndReady(cs, int32(replicas), sts) + + ginkgo.By("Getting pods for statefulset " + name) + pods := statefulset.GetPodList(cs, sts) + framework.ExpectHaveLen(pods.Items, replicas) + statefulset.SortStatefulPods(pods) + + ips := make([]string, 0, replicas) + for _, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpPoolAnnotation, ippool) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) + ips = append(ips, pod.Annotations[util.IpAddressAnnotation]) + } + framework.ExpectConsistOf(ips, strings.Split(ippool, ";")) + + ginkgo.By("Deleting pods for statefulset " + name) + for _, pod := range pods.Items { + err = podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod "+pod.Name) + } + statefulset.WaitForRunningAndReady(cs, int32(replicas), sts) + + ginkgo.By("Getting pods for statefulset " + name) + pods = statefulset.GetPodList(cs, sts) + framework.ExpectHaveLen(pods.Items, replicas) + statefulset.SortStatefulPods(pods) + + for i, pod := range pods.Items { + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpPoolAnnotation, ippool) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IpAddressAnnotation, ips[i]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) + framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) + } + + ginkgo.By("Deleting statefulset " + name) + err = cs.AppsV1().StatefulSets(namespaceName).Delete(context.TODO(), name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete statefulset "+name) + }) +}) diff --git a/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go b/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go new file mode 100644 index 00000000000..f306caa44d1 --- /dev/null +++ b/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go @@ -0,0 +1,145 @@ +package kubectl_ko + +import ( + "fmt" + "strings" + + clientset "k8s.io/client-go/kubernetes" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +const ( + targetIPv4 = "8.8.8.8" + targetIPv6 = "2001:4860:4860::8888" +) + +func execOrDie(cmd string) { + ginkgo.By(`Executing "kubectl ` + cmd + `"`) + e2ekubectl.NewKubectlCommand("", strings.Fields(cmd)...).ExecOrDie("") +} + +var _ = framework.Describe("[group:kubectl-ko]", func() { + f := framework.NewDefaultFramework("kubectl-ko") + + var cs clientset.Interface + var podClient *framework.PodClient + var namespaceName, kubectlConfig string + ginkgo.BeforeEach(func() { + cs = f.ClientSet + podClient = f.PodClient() + namespaceName = f.Namespace.Name + kubectlConfig = k8sframework.TestContext.KubeConfig + k8sframework.TestContext.KubeConfig = "" + }) + ginkgo.AfterEach(func() { + k8sframework.TestContext.KubeConfig = kubectlConfig + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko nbctl show"`, func() { + execOrDie("ko nbctl show") + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko sbctl show"`, func() { + execOrDie("ko sbctl show") + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko vsctl show"`, func() { + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + for _, node := range nodeList.Items { + execOrDie(fmt.Sprintf("ko vsctl %s show", node.Name)) + } + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko ofctl show br-int"`, func() { + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + for _, node := range nodeList.Items { + execOrDie(fmt.Sprintf("ko ofctl %s show br-int", node.Name)) + } + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko dpctl show"`, func() { + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + for _, node := range nodeList.Items { + execOrDie(fmt.Sprintf("ko dpctl %s show", node.Name)) + } + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko appctl list-commands"`, func() { + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + for _, node := range nodeList.Items { + execOrDie(fmt.Sprintf("ko appctl %s list-commands", node.Name)) + } + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko nb/sb status/backup"`, func() { + databases := [...]string{"nb", "sb"} + actions := [...]string{"status", "backup"} + for _, db := range databases { + for _, action := range actions { + execOrDie(fmt.Sprintf("ko %s %s", db, action)) + // TODO: verify backup files are present + } + } + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko tcpdump -c1"`, func() { + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name) + ping, target := "ping", targetIPv4 + if f.IPv6() { + ping, target = "ping6", targetIPv6 + } + + cmd := []string{"sh", "-c", fmt.Sprintf(`while true; do %s -c1 -w1 %s; sleep 1; done`, ping, target)} + pod := framework.MakePod(namespaceName, name, nil, nil, framework.BusyBoxImage, cmd, nil) + pod.Spec.TerminationGracePeriodSeconds = new(int64) + pod = podClient.CreateSync(pod) + + execOrDie(fmt.Sprintf("ko tcpdump %s/%s -c1", pod.Namespace, pod.Name)) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) + + framework.ConformanceIt(`should succeed to execute "kubectl ko trace "`, func() { + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name) + pod := framework.MakePod(namespaceName, name, nil, nil, "", nil, nil) + pod = podClient.CreateSync(pod) + + for _, ip := range pod.Status.PodIPs { + target := targetIPv4 + if util.CheckProtocol(ip.IP) == apiv1.ProtocolIPv6 { + target = targetIPv6 + } + + prefix := fmt.Sprintf("ko trace %s/%s %s", pod.Namespace, pod.Name, target) + execOrDie(fmt.Sprintf("%s icmp", prefix)) + execOrDie(fmt.Sprintf("%s tcp 80", prefix)) + execOrDie(fmt.Sprintf("%s udp 53", prefix)) + } + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) +}) diff --git a/test/e2e/kube-ovn/node/node.go b/test/e2e/kube-ovn/node/node.go new file mode 100644 index 00000000000..d3df826caab --- /dev/null +++ b/test/e2e/kube-ovn/node/node.go @@ -0,0 +1,46 @@ +package node + +import ( + clientset "k8s.io/client-go/kubernetes" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +var _ = framework.Describe("[group:node]", func() { + f := framework.NewDefaultFramework("node") + f.SkipNamespaceCreation = true + + var cs clientset.Interface + var subnetClient *framework.SubnetClient + ginkgo.BeforeEach(func() { + cs = f.ClientSet + subnetClient = f.SubnetClient() + }) + + framework.ConformanceIt("should allocate ip in join subnet to node", func() { + ginkgo.By("Getting join subnet") + join := subnetClient.Get("join") + + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + ginkgo.By("Validating node annotations") + for _, node := range nodeList.Items { + framework.ExpectHaveKeyWithValue(node.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectUUID(node.Annotations[util.ChassisAnnotation]) + framework.ExpectHaveKeyWithValue(node.Annotations, util.CidrAnnotation, join.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(node.Annotations, util.GatewayAnnotation, join.Spec.Gateway) + framework.ExpectIPInCIDR(node.Annotations[util.IpAddressAnnotation], join.Spec.CIDRBlock) + framework.ExpectHaveKeyWithValue(node.Annotations, util.LogicalSwitchAnnotation, join.Name) + framework.ExpectMAC(node.Annotations[util.MacAddressAnnotation]) + framework.ExpectHaveKeyWithValue(node.Annotations, util.PortNameAnnotation, "node-"+node.Name) + + // TODO: check IP/route on ovn0 + } + }) +}) diff --git a/test/e2e/kube-ovn/qos/qos.go b/test/e2e/kube-ovn/qos/qos.go new file mode 100644 index 00000000000..b5aeb7c702e --- /dev/null +++ b/test/e2e/kube-ovn/qos/qos.go @@ -0,0 +1,244 @@ +package qos + +import ( + "fmt" + "strconv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" + + "github.com/onsi/ginkgo/v2" + + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +func parseConfig(table, config string) map[string]string { + kvs := make(map[string]string, 3) + for _, s := range strings.Fields(config) { + kv := strings.Split(s, "=") + if len(kv) != 2 { + framework.Logf("ignore %s config %s", table, s) + continue + } + kvs[kv[0]] = kv[1] + } + + return kvs +} + +func getOvsQosForPod(cs clientset.Interface, table string, pod *corev1.Pod) map[string]string { + ovsPod := framework.GetOvsPodOnNode(cs, pod.Spec.NodeName) + cmd := fmt.Sprintf(`ovs-vsctl --no-heading --columns=other_config --bare find %s external_ids:pod="%s/%s"`, table, pod.Namespace, pod.Name) + output := e2epodoutput.RunHostCmdOrDie(ovsPod.Namespace, ovsPod.Name, cmd) + return parseConfig(table, output) +} + +func getOvsQosForPodRetry(cs clientset.Interface, table string, pod *corev1.Pod, expected map[string]string) map[string]string { + ovsPod := framework.GetOvsPodOnNode(cs, pod.Spec.NodeName) + cmd := fmt.Sprintf(`ovs-vsctl --no-heading --columns=other_config --bare find %s external_ids:pod="%s/%s"`, table, pod.Namespace, pod.Name) + + var config map[string]string + err := wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { + output, err := e2epodoutput.RunHostCmd(ovsPod.Namespace, ovsPod.Name, cmd) + if err != nil { + return false, err + } + if output == "" { + return false, nil + } + kvs := parseConfig(table, output) + for k, v := range expected { + if kvs[k] != v { + return false, nil + } + } + + config = kvs + return true, nil + }) + framework.ExpectNoError(err, "timed out getting ovs %s config for pod %s/%s", table, pod.Namespace, pod.Name) + + return config +} + +var _ = framework.Describe("[group:qos]", func() { + f := framework.NewDefaultFramework("qos") + + var subnetName, namespaceName string + var cs clientset.Interface + var podClient *framework.PodClient + var subnetClient *framework.SubnetClient + + ginkgo.BeforeEach(func() { + cs = f.ClientSet + podClient = f.PodClient() + subnetClient = f.SubnetClient() + namespaceName = f.Namespace.Name + }) + ginkgo.AfterEach(func() { + if subnetName != "" { + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + } + }) + + framework.ConformanceIt(`should support netem QoS"`, func() { + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name) + latency, limit, loss := 600, 2000, 10 + annotations := map[string]string{ + util.NetemQosLatencyAnnotation: strconv.Itoa(latency), + util.NetemQosLimitAnnotation: strconv.Itoa(limit), + util.NetemQosLossAnnotation: strconv.Itoa(loss), + } + pod := framework.MakePod(namespaceName, name, nil, annotations, "", nil, nil) + pod = podClient.CreateSync(pod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLatencyAnnotation, strconv.Itoa(latency)) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLimitAnnotation, strconv.Itoa(limit)) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLossAnnotation, strconv.Itoa(loss)) + + ginkgo.By("Validating OVS QoS") + qos := getOvsQosForPod(cs, "qos", pod) + framework.ExpectHaveKeyWithValue(qos, "latency", strconv.Itoa(latency*1000)) + framework.ExpectHaveKeyWithValue(qos, "limit", strconv.Itoa(limit)) + framework.ExpectHaveKeyWithValue(qos, "loss", strconv.Itoa(loss)) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) + + framework.ConformanceIt(`should be able to update netem QoS"`, func() { + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name + " without QoS") + pod := framework.MakePod(namespaceName, name, nil, nil, "", nil, nil) + pod = podClient.CreateSync(pod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLatencyAnnotation) + framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLimitAnnotation) + framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLossAnnotation) + + ginkgo.By("Adding netem QoS to pod annotations") + latency, limit, loss := 600, 2000, 10 + modifiedPod := pod.DeepCopy() + modifiedPod.Annotations[util.NetemQosLatencyAnnotation] = strconv.Itoa(latency) + modifiedPod.Annotations[util.NetemQosLimitAnnotation] = strconv.Itoa(limit) + modifiedPod.Annotations[util.NetemQosLossAnnotation] = strconv.Itoa(loss) + pod = podClient.PatchPod(pod, modifiedPod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLatencyAnnotation, strconv.Itoa(latency)) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLimitAnnotation, strconv.Itoa(limit)) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLossAnnotation, strconv.Itoa(loss)) + + ginkgo.By("Validating OVS QoS") + qos := getOvsQosForPodRetry(cs, "qos", pod, nil) + framework.ExpectHaveKeyWithValue(qos, "latency", strconv.Itoa(latency*1000)) + framework.ExpectHaveKeyWithValue(qos, "limit", strconv.Itoa(limit)) + framework.ExpectHaveKeyWithValue(qos, "loss", strconv.Itoa(loss)) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) + + framework.ConformanceIt(`should support htb QoS"`, func() { + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name) + priority, ingressRate := 50, 300 + annotations := map[string]string{ + util.PriorityAnnotation: strconv.Itoa(priority), + util.IngressRateAnnotation: strconv.Itoa(ingressRate), + } + pod := framework.MakePod(namespaceName, name, nil, annotations, "", nil, nil) + pod = podClient.CreateSync(pod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.PriorityAnnotation, strconv.Itoa(priority)) + framework.ExpectHaveKeyWithValue(pod.Annotations, util.IngressRateAnnotation, strconv.Itoa(ingressRate)) + + ginkgo.By("Validating OVS Queue") + queue := getOvsQosForPod(cs, "queue", pod) + framework.ExpectHaveKeyWithValue(queue, "max-rate", strconv.Itoa(ingressRate*1000*1000)) + framework.ExpectHaveKeyWithValue(queue, "priority", strconv.Itoa(priority)) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) + + framework.ConformanceIt(`should be able to update htb QoS"`, func() { + subnetName = f.Namespace.Name + ginkgo.By("Creating subnet " + subnetName + " with htb QoS") + cidr := framework.RandomCIDR(f.ClusterIpFamily) + subnet := framework.MakeSubnet(subnetName, "", cidr, "", nil, nil, []string{namespaceName}) + subnet.Spec.HtbQos = util.HtbQosLow + subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet .spec.htbqos field") + framework.ExpectEqual(subnet.Spec.HtbQos, util.HtbQosLow) + + name := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + name) + pod := framework.MakePod(namespaceName, name, nil, nil, "", nil, nil) + pod = podClient.CreateSync(pod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectNotHaveKey(pod.Annotations, util.PriorityAnnotation) + framework.ExpectNotHaveKey(pod.Annotations, util.IngressRateAnnotation) + + ginkgo.By("Validating OVS Queue") + defaultPriority := 5 + queue := getOvsQosForPod(cs, "queue", pod) + framework.ExpectHaveKeyWithValue(queue, "priority", strconv.Itoa(defaultPriority)) + + ginkgo.By("Update htb priority by adding pod annotation") + priority := 2 + modifiedPod := pod.DeepCopy() + modifiedPod.Annotations[util.PriorityAnnotation] = strconv.Itoa(priority) + pod = podClient.PatchPod(pod, modifiedPod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.PriorityAnnotation, strconv.Itoa(priority)) + framework.ExpectNotHaveKey(pod.Annotations, util.IngressRateAnnotation) + + ginkgo.By("Validating OVS Queue") + expected := map[string]string{"priority": strconv.Itoa(priority)} + _ = getOvsQosForPodRetry(cs, "queue", pod, expected) + + ginkgo.By("Update htb priority by deleting pod annotation") + modifiedPod = pod.DeepCopy() + delete(modifiedPod.Annotations, util.PriorityAnnotation) + pod = podClient.PatchPod(pod, modifiedPod) + + ginkgo.By("Validating pod annotations") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") + framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") + framework.ExpectNotHaveKey(pod.Annotations, util.PriorityAnnotation) + framework.ExpectNotHaveKey(pod.Annotations, util.IngressRateAnnotation) + + ginkgo.By("Validating OVS Queue") + expected = map[string]string{"priority": strconv.Itoa(defaultPriority)} + _ = getOvsQosForPodRetry(cs, "queue", pod, expected) + + ginkgo.By("Deleting pod " + name) + podClient.DeleteSync(pod.Name) + }) +}) diff --git a/test/e2e/kube-ovn/subnet/subnet.go b/test/e2e/kube-ovn/subnet/subnet.go new file mode 100644 index 00000000000..d92614d66ea --- /dev/null +++ b/test/e2e/kube-ovn/subnet/subnet.go @@ -0,0 +1,525 @@ +package subnet + +import ( + "fmt" + "math/rand" + "net" + "strconv" + "strings" + + clientset "k8s.io/client-go/kubernetes" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +var _ = framework.Describe("[group:subnet]", func() { + f := framework.NewDefaultFramework("subnet") + + var subnet *apiv1.Subnet + var cs clientset.Interface + var podClient *framework.PodClient + var subnetClient *framework.SubnetClient + var namespaceName, subnetName string + var cidr, cidrV4, cidrV6, firstIPv4, firstIPv6 string + var gateways []string + + ginkgo.BeforeEach(func() { + cs = f.ClientSet + podClient = f.PodClient() + subnetClient = f.SubnetClient() + namespaceName = f.Namespace.Name + subnetName = "subnet-" + framework.RandomSuffix() + cidr = framework.RandomCIDR(f.ClusterIpFamily) + cidrV4, cidrV6 = util.SplitStringIP(cidr) + gateways = nil + if cidrV4 == "" { + firstIPv4 = "" + } else { + firstIPv4, _ = util.FirstIP(cidrV4) + gateways = append(gateways, firstIPv4) + } + if cidrV6 == "" { + firstIPv6 = "" + } else { + firstIPv6, _ = util.FirstIP(cidrV6) + gateways = append(gateways, firstIPv6) + } + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + }) + + framework.ConformanceIt("should create subnet with only cidr provided", func() { + ginkgo.By("Creating subnet " + subnetName) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, nil, nil) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) + framework.ExpectEmpty(subnet.Spec.GatewayNode) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectEmpty(subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) + } + + // TODO: check routes on ovn0 + }) + + framework.ConformanceIt("should format subnet cidr", func() { + fn := func(cidr string) string { + if cidr == "" { + return "" + } + _, ipnet, _ := net.ParseCIDR(cidr) + ipnet.IP = net.ParseIP(framework.RandomIPPool(cidr, 1)) + return ipnet.String() + } + + s := make([]string, 0, 2) + if c := fn(cidrV4); c != "" { + s = append(s, c) + } + if c := fn(cidrV6); c != "" { + s = append(s, c) + } + + subnet = framework.MakeSubnet(subnetName, "", strings.Join(s, ","), "", nil, nil, nil) + ginkgo.By("Creating subnet " + subnetName + " with cidr " + subnet.Spec.CIDRBlock) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.ObjectMeta.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) + framework.ExpectEmpty(subnet.Spec.GatewayNode) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectEmpty(subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) + } + + // TODO: check routes on ovn0 + }) + + framework.ConformanceIt("should create subnet with exclude ips", func() { + excludeIPv4 := framework.RandomExcludeIPs(cidrV4, rand.Intn(10)+1) + excludeIPv6 := framework.RandomExcludeIPs(cidrV6, rand.Intn(10)+1) + excludeIPs := append(excludeIPv4, excludeIPv6...) + + ginkgo.By(fmt.Sprintf("Creating subnet %s with exclude ips %v", subnetName, excludeIPs)) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", excludeIPs, nil, nil) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.ObjectMeta.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, append(excludeIPs, gateways...)) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) + framework.ExpectEmpty(subnet.Spec.GatewayNode) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectEmpty(subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + expected := util.AddressCount(ipnet) - util.CountIpNums(excludeIPv4) - 1 + framework.ExpectEqual(subnet.Status.V4AvailableIPs, expected) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + expected := util.AddressCount(ipnet) - util.CountIpNums(excludeIPv6) - 1 + framework.ExpectEqual(subnet.Status.V6AvailableIPs, expected) + } + }) + + framework.ConformanceIt("should create subnet with centralized gateway", func() { + ginkgo.By("Getting nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + framework.ExpectNotEmpty(nodes.Items) + + ginkgo.By("Creating subnet " + subnetName) + gatewayNodes := make([]string, 0, len(nodes.Items)) + for i := 0; i < 3 && i < len(nodes.Items); i++ { + gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) + } + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, gatewayNodes, nil) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWCentralizedType) + framework.ExpectConsistOf(strings.Split(subnet.Spec.GatewayNode, ","), gatewayNodes) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectContainElement(gatewayNodes, subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) + } + }) + + framework.ConformanceIt("should be able to switch gateway mode to centralized", func() { + ginkgo.By("Getting nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + framework.ExpectNotEmpty(nodes.Items) + + ginkgo.By("Creating subnet " + subnetName) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, nil, nil) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) + framework.ExpectEmpty(subnet.Spec.GatewayNode) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectEmpty(subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) + } + + ginkgo.By("Converting gateway mode to centralized") + gatewayNodes := make([]string, 0, len(nodes.Items)) + for i := 0; i < 3 && i < len(nodes.Items); i++ { + gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) + } + modifiedSubnet := subnet.DeepCopy() + modifiedSubnet.Spec.GatewayNode = strings.Join(gatewayNodes, ",") + modifiedSubnet.Spec.GatewayType = apiv1.GWCentralizedType + subnet = subnetClient.PatchSync(subnet, modifiedSubnet) + + ginkgo.By("Validating subnet finalizers") + framework.ExpectContainElement(subnet.ObjectMeta.Finalizers, util.ControllerName) + + ginkgo.By("Validating subnet spec fields") + framework.ExpectFalse(subnet.Spec.Default) + framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) + framework.ExpectEmpty(subnet.Spec.Namespaces) + framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) + framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) + framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWCentralizedType) + framework.ExpectConsistOf(strings.Split(subnet.Spec.GatewayNode, ","), gatewayNodes) + framework.ExpectFalse(subnet.Spec.NatOutgoing) + framework.ExpectFalse(subnet.Spec.Private) + framework.ExpectEmpty(subnet.Spec.AllowSubnets) + + ginkgo.By("Validating subnet status fields") + framework.ExpectContainElement(gatewayNodes, subnet.Status.ActivateGateway) + framework.ExpectZero(subnet.Status.V4UsingIPs) + framework.ExpectZero(subnet.Status.V6UsingIPs) + + if cidrV4 == "" { + framework.ExpectZero(subnet.Status.V4AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV4) + framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) + } + if cidrV6 == "" { + framework.ExpectZero(subnet.Status.V6AvailableIPs) + } else { + _, ipnet, _ := net.ParseCIDR(cidrV6) + framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) + } + }) + + framework.ConformanceIt("should support distributed external egress gateway", func() { + ginkgo.By("Getting nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + framework.ExpectNotEmpty(nodes.Items) + + clusterName, ok := kind.IsKindProvided(nodes.Items[0].Spec.ProviderID) + if !ok { + ginkgo.Skip("external egress gateway spec only runs in clusters created by kind") + } + + ginkgo.By("Getting docker network used by kind") + network, err := docker.NetworkGet(kind.NetworkName) + framework.ExpectNoError(err) + + ginkgo.By("Determine external egress gateway addresses") + gateways := make([]string, 0, 2) + for _, config := range network.IPAM.Config { + if config.Subnet != "" { + switch util.CheckProtocol(config.Subnet) { + case apiv1.ProtocolIPv4: + if cidrV4 != "" { + gateway, err := util.LastIP(config.Subnet) + framework.ExpectNoError(err) + gateways = append(gateways, gateway) + } + case apiv1.ProtocolIPv6: + if cidrV6 != "" { + gateway, err := util.LastIP(config.Subnet) + framework.ExpectNoError(err) + gateways = append(gateways, gateway) + } + } + } + } + + ginkgo.By("Creating subnet " + subnetName) + prPriority := 1000 + rand.Intn(1000) + prTable := 1000 + rand.Intn(1000) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, nil, nil) + subnet.Spec.ExternalEgressGateway = strings.Join(gateways, ",") + subnet.Spec.PolicyRoutingPriority = uint32(prPriority) + subnet.Spec.PolicyRoutingTableID = uint32(prTable) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Creating pod") + podName := "pod-" + framework.RandomSuffix() + pod := framework.MakePod(namespaceName, podName, nil, nil, "", nil, nil) + pod = podClient.CreateSync(pod) + + ginkgo.By("Getting kind nodes") + kindNodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err) + framework.ExpectNotEmpty(kindNodes) + + for _, node := range kindNodes { + ginkgo.By("Getting ip rules in node " + node.Name()) + rules, err := iproute.RuleShow("", node.Exec) + framework.ExpectNoError(err) + + ginkgo.By("Checking ip rules in node " + node.Name()) + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + for _, rule := range rules { + if rule.Priority == prPriority && + rule.Table == strconv.Itoa(prTable) { + framework.ExpectEqual(pod.Spec.NodeName, node.Name()) + framework.ExpectContainElement(podIPs, rule.Src) + framework.ExpectEqual(rule.SrcLen, 0) + } + } + + if pod.Spec.NodeName != node.Name() { + continue + } + + ginkgo.By("Getting ip routes in node " + node.Name()) + routes, err := iproute.RouteShow(strconv.Itoa(prTable), "", node.Exec) + framework.ExpectNoError(err) + + ginkgo.By("Checking ip routes in node " + node.Name()) + framework.ExpectHaveLen(routes, len(gateways)) + nexthops := make([]string, 0, 2) + for _, route := range routes { + framework.ExpectEqual(route.Dst, "default") + nexthops = append(nexthops, route.Gateway) + } + framework.ExpectConsistOf(nexthops, gateways) + } + }) + + framework.ConformanceIt("should support centralized external egress gateway", func() { + ginkgo.By("Getting nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + framework.ExpectNotEmpty(nodes.Items) + + clusterName, ok := kind.IsKindProvided(nodes.Items[0].Spec.ProviderID) + if !ok { + ginkgo.Skip("external egress gateway spec only runs in clusters created by kind") + } + + ginkgo.By("Getting docker network used by kind") + network, err := docker.NetworkGet(kind.NetworkName) + framework.ExpectNoError(err) + + ginkgo.By("Determine external egress gateway addresses") + cidrs := make([]string, 0, 2) + gateways := make([]string, 0, 2) + for _, config := range network.IPAM.Config { + if config.Subnet != "" { + switch util.CheckProtocol(config.Subnet) { + case apiv1.ProtocolIPv4: + if cidrV4 != "" { + gateway, err := util.LastIP(config.Subnet) + framework.ExpectNoError(err) + cidrs = append(cidrs, cidrV4) + gateways = append(gateways, gateway) + } + case apiv1.ProtocolIPv6: + if cidrV6 != "" { + gateway, err := util.LastIP(config.Subnet) + framework.ExpectNoError(err) + cidrs = append(cidrs, cidrV6) + gateways = append(gateways, gateway) + } + } + } + } + + ginkgo.By("Creating subnet " + subnetName) + gatewayNodes := make([]string, 0, len(nodes.Items)) + for i := 0; i < 3 && i < len(nodes.Items); i++ { + gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) + } + prPriority := 1000 + rand.Intn(1000) + prTable := 1000 + rand.Intn(1000) + subnet = framework.MakeSubnet(subnetName, "", cidr, "", nil, gatewayNodes, nil) + subnet.Spec.ExternalEgressGateway = strings.Join(gateways, ",") + subnet.Spec.PolicyRoutingPriority = uint32(prPriority) + subnet.Spec.PolicyRoutingTableID = uint32(prTable) + subnet = subnetClient.CreateSync(subnet) + + ginkgo.By("Getting kind nodes") + kindNodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err) + framework.ExpectNotEmpty(kindNodes) + + for _, node := range kindNodes { + shouldHavePolicyRoute := util.ContainsString(gatewayNodes, node.Name()) + ginkgo.By("Getting ip rules in node " + node.Name()) + rules, err := iproute.RuleShow("", node.Exec) + framework.ExpectNoError(err) + + ginkgo.By("Checking ip rules in node " + node.Name()) + var found int + for _, rule := range rules { + if rule.Priority == prPriority && + rule.Table == strconv.Itoa(prTable) { + framework.ExpectContainElement(cidrs, fmt.Sprintf("%s/%d", rule.Src, rule.SrcLen)) + found++ + } + } + if !shouldHavePolicyRoute { + framework.ExpectZero(found) + continue + } + framework.ExpectEqual(found, len(gateways)) + + ginkgo.By("Getting ip routes in node " + node.Name()) + routes, err := iproute.RouteShow(strconv.Itoa(prTable), "", node.Exec) + framework.ExpectNoError(err) + + ginkgo.By("Checking ip routes in node " + node.Name()) + framework.ExpectHaveLen(routes, len(gateways)) + nexthops := make([]string, 0, 2) + for _, route := range routes { + framework.ExpectEqual(route.Dst, "default") + nexthops = append(nexthops, route.Gateway) + } + framework.ExpectConsistOf(nexthops, gateways) + } + }) +}) diff --git a/test/e2e/kube-ovn/underlay/underlay.go b/test/e2e/kube-ovn/underlay/underlay.go new file mode 100644 index 00000000000..0554b728325 --- /dev/null +++ b/test/e2e/kube-ovn/underlay/underlay.go @@ -0,0 +1,330 @@ +package underlay + +import ( + "fmt" + "strconv" + "strings" + "time" + + dockertypes "github.com/docker/docker/api/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +const dockerNetworkName = "kube-ovn-vlan" + +func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, linkMap map[string]*iproute.Link) *apiv1.ProviderNetwork { + var defaultInterface string + customInterfaces := make(map[string][]string, 0) + for node, link := range linkMap { + if !strings.ContainsRune(node, '-') { + continue + } + + if defaultInterface == "" { + defaultInterface = link.IfName + } else if link.IfName != defaultInterface { + customInterfaces[link.IfName] = append(customInterfaces[link.IfName], node) + } + } + + return framework.MakeProviderNetwork(providerNetworkName, exchangeLinkName, defaultInterface, customInterfaces, nil) +} + +var _ = framework.Describe("[group:underlay]", func() { + f := framework.NewDefaultFramework("provider-network") + + var skip bool + var itFn func(bool) + var cs clientset.Interface + var nodeNames []string + var clusterName, providerNetworkName, vlanName, subnetName, namespaceName string + var linkMap map[string]*iproute.Link + var routeMap map[string][]iproute.Route + var podClient *framework.PodClient + var subnetClient *framework.SubnetClient + var vlanClient *framework.VlanClient + var providerNetworkClient *framework.ProviderNetworkClient + var dockerNetwork *dockertypes.NetworkResource + + ginkgo.BeforeEach(func() { + cs = f.ClientSet + podClient = f.PodClient() + subnetClient = f.SubnetClient() + vlanClient = f.VlanClient() + providerNetworkClient = f.ProviderNetworkClient() + namespaceName = f.Namespace.Name + subnetName = "subnet-" + framework.RandomSuffix() + vlanName = "vlan-" + framework.RandomSuffix() + providerNetworkName = "pn-" + framework.RandomSuffix() + + if skip { + ginkgo.Skip("underlay spec only runs on kind clusters") + } + + if clusterName == "" { + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) + if !ok { + skip = true + ginkgo.Skip("underlay spec only runs on kind clusters") + } + clusterName = cluster + } + + if dockerNetwork == nil { + ginkgo.By("Ensuring docker network " + dockerNetworkName + " exists") + network, err := docker.NetworkCreate(dockerNetworkName, true, true) + framework.ExpectNoError(err, "creating docker network "+dockerNetworkName) + dockerNetwork = network + } + + ginkgo.By("Getting kind nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + framework.ExpectNotEmpty(nodes) + + ginkgo.By("Connecting nodes to the docker network") + err = kind.NetworkConnect(dockerNetwork.ID, nodes) + framework.ExpectNoError(err, "connecting nodes to network "+dockerNetworkName) + + ginkgo.By("Getting node links that belong to the docker network") + nodes, err = kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + linkMap = make(map[string]*iproute.Link, len(nodes)) + routeMap = make(map[string][]iproute.Route, len(nodes)) + nodeNames = make([]string, 0, len(nodes)) + for _, node := range nodes { + links, err := node.ListLinks() + framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) + + routes, err := node.ListRoutes(true) + framework.ExpectNoError(err, "failed to list routes on node %s: %v", node.Name(), err) + + for _, link := range links { + if link.Address == node.NetworkSettings.Networks[dockerNetworkName].MacAddress { + linkMap[node.ID] = &link + break + } + } + framework.ExpectHaveKey(linkMap, node.ID) + + link := linkMap[node.ID] + for _, route := range routes { + if route.Dev == link.IfName { + r := iproute.Route{ + Dst: route.Dst, + Gateway: route.Gateway, + Dev: route.Dev, + Flags: route.Flags, + } + routeMap[node.ID] = append(routeMap[node.ID], r) + } + } + framework.ExpectHaveKey(linkMap, node.ID) + + linkMap[node.Name()] = linkMap[node.ID] + routeMap[node.Name()] = routeMap[node.ID] + nodeNames = append(nodeNames, node.Name()) + } + + itFn = func(exchangeLinkName bool) { + ginkgo.By("Creating provider network") + pn := makeProviderNetwork(providerNetworkName, exchangeLinkName, linkMap) + pn = providerNetworkClient.CreateSync(pn) + + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + ginkgo.By("Validating node labels") + for _, node := range k8sNodes.Items { + link := linkMap[node.Name] + framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkInterfaceTemplate, providerNetworkName), link.IfName) + framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkReadyTemplate, providerNetworkName), "true") + framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkMtuTemplate, providerNetworkName), strconv.Itoa(link.Mtu)) + framework.ExpectNotHaveKey(node.Labels, fmt.Sprintf(util.ProviderNetworkExcludeTemplate, providerNetworkName)) + } + + ginkgo.By("Validating provider network status") + framework.ExpectEqual(pn.Status.Ready, true, "field .status.ready should be true") + framework.ExpectConsistOf(pn.Status.ReadyNodes, nodeNames) + framework.ExpectEmpty(pn.Status.Vlans) + + ginkgo.By("Getting kind nodes") + kindNodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err) + + ginkgo.By("Validating node links") + linkNameMap := make(map[string]string, len(kindNodes)) + bridgeName := util.ExternalBridgeName(providerNetworkName) + for _, node := range kindNodes { + if exchangeLinkName { + bridgeName = linkMap[node.ID].IfName + } + + links, err := node.ListLinks() + framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) + + var port, bridge *iproute.Link + for i, link := range links { + if link.IfIndex == linkMap[node.ID].IfIndex { + port = &links[i] + } else if link.IfName == bridgeName { + bridge = &links[i] + } + if port != nil && bridge != nil { + break + } + } + framework.ExpectNotNil(port) + framework.ExpectEqual(port.Address, linkMap[node.ID].Address) + framework.ExpectEqual(port.Mtu, linkMap[node.ID].Mtu) + framework.ExpectEqual(port.Master, "ovs-system") + framework.ExpectEqual(port.OperState, "UP") + if exchangeLinkName { + framework.ExpectEqual(port.IfName, util.ExternalBridgeName(providerNetworkName)) + } + + framework.ExpectNotNil(bridge) + framework.ExpectEqual(bridge.LinkInfo.InfoKind, "openvswitch") + framework.ExpectEqual(bridge.Address, port.Address) + framework.ExpectEqual(bridge.Mtu, port.Mtu) + framework.ExpectEqual(bridge.OperState, "UNKNOWN") + framework.ExpectContainElement(bridge.Flags, "UP") + + framework.ExpectEmpty(port.NonLinkLocalAddresses()) + framework.ExpectConsistOf(bridge.NonLinkLocalAddresses(), linkMap[node.ID].NonLinkLocalAddresses()) + + linkNameMap[node.ID] = port.IfName + } + + ginkgo.By("Validating node routes") + for _, node := range kindNodes { + if exchangeLinkName { + bridgeName = linkMap[node.ID].IfName + } + + routes, err := node.ListRoutes(true) + framework.ExpectNoError(err, "failed to list routes on node %s: %v", node.Name(), err) + + var portRoutes, bridgeRoutes []iproute.Route + for _, route := range routes { + r := iproute.Route{ + Dst: route.Dst, + Gateway: route.Gateway, + Dev: route.Dev, + Flags: route.Flags, + } + if route.Dev == linkNameMap[node.ID] { + portRoutes = append(portRoutes, r) + } else if route.Dev == bridgeName { + r.Dev = linkMap[node.ID].IfName + bridgeRoutes = append(bridgeRoutes, r) + } + } + + framework.ExpectEmpty(portRoutes, "no routes should exists on provider link") + framework.ExpectConsistOf(bridgeRoutes, routeMap[node.ID]) + } + } + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + + ginkgo.By("Deleting vlan " + vlanName) + vlanClient.Delete(vlanName, metav1.DeleteOptions{}) + + ginkgo.By("Deleting provider network") + providerNetworkClient.DeleteSync(providerNetworkName) + + ginkgo.By("Getting nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in cluster") + + ginkgo.By("Waiting for ovs bridge to disappear") + deadline := time.Now().Add(time.Minute) + for _, node := range nodes { + err = node.WaitLinkToDisappear(util.ExternalBridgeName(providerNetworkName), 2*time.Second, deadline) + framework.ExpectNoError(err, "timed out waiting for ovs bridge to disappear in node %s", node.Name()) + } + + if dockerNetwork != nil { + ginkgo.By("Disconnecting nodes from the docker network") + err = kind.NetworkDisconnect(dockerNetwork.ID, nodes) + framework.ExpectNoError(err, "disconnecting nodes from network "+dockerNetworkName) + } + }) + + framework.ConformanceIt(`should be able to create provider network`, func() { + itFn(false) + }) + + framework.ConformanceIt(`should exchange link names`, func() { + itFn(true) + }) + + framework.ConformanceIt("should keep pod mtu the same with node interface", func() { + ginkgo.By("Creating provider network") + pn := makeProviderNetwork(providerNetworkName, false, linkMap) + _ = providerNetworkClient.CreateSync(pn) + + ginkgo.By("Getting docker network " + dockerNetworkName) + network, err := docker.NetworkGet(dockerNetworkName) + framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) + + ginkgo.By("Creating vlan " + vlanName) + vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) + _ = vlanClient.Create(vlan) + + ginkgo.By("Creating subnet " + subnetName) + cidr := make([]string, 0, 2) + gateway := make([]string, 0, 2) + for _, config := range dockerNetwork.IPAM.Config { + cidr = append(cidr, config.Subnet) + gateway = append(gateway, config.Gateway) + } + excludeIPs := make([]string, 0, len(network.Containers)*2) + for _, container := range network.Containers { + if container.IPv4Address != "" { + excludeIPs = append(excludeIPs, container.IPv4Address) + } + if container.IPv6Address != "" { + excludeIPs = append(excludeIPs, container.IPv6Address) + } + } + subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(cidr, ","), strings.Join(gateway, ","), excludeIPs, nil, []string{namespaceName}) + _ = subnetClient.CreateSync(subnet) + + podName := "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + podName) + cmd := []string{"sh", "-c", "sleep 600"} + pod := framework.MakePod(namespaceName, podName, nil, nil, framework.GetKubeOvnImage(cs), cmd, nil) + _ = podClient.CreateSync(pod) + + ginkgo.By("Validating pod MTU") + links, err := iproute.AddressShow("eth0", func(cmd ...string) ([]byte, []byte, error) { + return framework.KubectlExec(namespaceName, podName, cmd...) + }) + framework.ExpectNoError(err) + framework.ExpectHaveLen(links, 1, "should get eth0 information") + framework.ExpectEqual(links[0].Mtu, docker.MTU) + + ginkgo.By("Deleting pod " + podName) + podClient.DeleteSync(podName) + }) +}) diff --git a/test/e2e/kubectl-ko/ko.go b/test/e2e/kubectl-ko/ko.go deleted file mode 100644 index ef4078978c6..00000000000 --- a/test/e2e/kubectl-ko/ko.go +++ /dev/null @@ -1,108 +0,0 @@ -package kubectl_ko - -import ( - "context" - "fmt" - "os" - "os/exec" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = Describe("[kubectl-ko]", func() { - f := framework.NewFramework("kubectl-ko", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - It("nb show", func() { - output, err := exec.Command("kubectl", "ko", "nbctl", "show").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) - - It("sb show", func() { - output, err := exec.Command("kubectl", "ko", "sbctl", "show").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) - - It("vsctl show", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes.Items { - output, err := exec.Command("kubectl", "ko", "vsctl", node.Name, "show").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - } - }) - - It("ofctl show", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes.Items { - output, err := exec.Command("kubectl", "ko", "ofctl", node.Name, "show", "br-int").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - } - }) - - It("dpctl show", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes.Items { - output, err := exec.Command("kubectl", "ko", "dpctl", node.Name, "show").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - } - }) - - It("appctl list-commands", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes.Items { - output, err := exec.Command("kubectl", "ko", "appctl", node.Name, "list-commands").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - } - }) - - It("tcpdump", func() { - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: " app=kube-ovn-pinger"}) - Expect(err).NotTo(HaveOccurred()) - pod := pods.Items[0] - output, err := exec.Command("kubectl", "ko", "tcpdump", fmt.Sprintf("kube-system/%s", pod.Name), "-c", "1").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) - - It("trace", func() { - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: " app=kube-ovn-pinger"}) - Expect(err).NotTo(HaveOccurred()) - - pod := pods.Items[0] - dst := "114.114.114.114" - if util.CheckProtocol(pod.Status.PodIP) == kubeovn.ProtocolIPv6 { - dst = "2400:3200::1" - } - - output, err := exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "icmp").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "tcp", "80").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("kube-system/%s", pod.Name), dst, "udp", "53").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) - - It("nb/sb operation", func() { - output, err := exec.Command("kubectl", "ko", "nb", "status").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "sb", "status").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "nb", "backup").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "sb", "backup").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) -}) diff --git a/test/e2e/lb-svc/e2e_test.go b/test/e2e/lb-svc/e2e_test.go new file mode 100644 index 00000000000..4ef944846b8 --- /dev/null +++ b/test/e2e/lb-svc/e2e_test.go @@ -0,0 +1,227 @@ +package lb_svc + +import ( + "context" + "flag" + "fmt" + "math/big" + "os" + "path/filepath" + "strings" + "testing" + "time" + + dockertypes "github.com/docker/docker/api/types" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + "k8s.io/kubernetes/test/e2e/framework/deployment" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +const subnetProvider = "lb-svc-attachment.kube-system" + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + k8sframework.RegisterCommonFlags(flag.CommandLine) + k8sframework.RegisterClusterFlags(flag.CommandLine) + + // Parse all the flags + flag.Parse() + if k8sframework.TestContext.KubeConfig == "" { + k8sframework.TestContext.KubeConfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) +} + +func TestE2E(t *testing.T) { + e2e.RunE2ETests(t) +} + +func lbSvcDeploymentName(serviceName string) string { + return "lb-svc-" + serviceName +} + +var _ = framework.Describe("[group:lb-svc]", func() { + f := framework.NewDefaultFramework("lb-svc") + + var skip bool + var cs clientset.Interface + var subnetClient *framework.SubnetClient + var serviceClient *framework.ServiceClient + var clusterName, subnetName, namespaceName, serviceName string + var dockerNetwork *dockertypes.NetworkResource + var cidr, gateway string + ginkgo.BeforeEach(func() { + cs = f.ClientSet + subnetClient = f.SubnetClient() + serviceClient = f.ServiceClient() + namespaceName = f.Namespace.Name + subnetName = "subnet-" + framework.RandomSuffix() + serviceName = "service-" + framework.RandomSuffix() + + if skip { + ginkgo.Skip("underlay spec only runs on kind clusters") + } + + if clusterName == "" { + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + + cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) + if !ok { + skip = true + ginkgo.Skip("underlay spec only runs on kind clusters") + } + clusterName = cluster + } + + if dockerNetwork == nil { + ginkgo.By("Getting docker network " + kind.NetworkName) + network, err := docker.NetworkGet(kind.NetworkName) + framework.ExpectNoError(err, "getting docker network "+kind.NetworkName) + dockerNetwork = network + } + + ginkgo.By("Creating subnet " + subnetName) + for _, config := range dockerNetwork.IPAM.Config { + if !strings.ContainsRune(config.Subnet, ':') { + cidr = config.Subnet + gateway = config.Gateway + break + } + } + excludeIPs := make([]string, 0, len(dockerNetwork.Containers)) + for _, container := range dockerNetwork.Containers { + if container.IPv4Address != "" { + excludeIPs = append(excludeIPs, container.IPv4Address) + } + } + subnet := framework.MakeSubnet(subnetName, "", cidr, gateway, excludeIPs, nil, []string{namespaceName}) + subnet.Spec.Provider = subnetProvider + _ = subnetClient.Create(subnet) + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting service " + serviceName) + serviceClient.DeleteSync(serviceName) + + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + }) + + framework.ConformanceIt("should allocate dynamic external IP for service", func() { + ginkgo.By("Creating service " + serviceName) + ports := []corev1.ServicePort{{ + Name: "tcp", + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }} + annotations := map[string]string{ + subnetProvider + ".kubernetes.io/logical_switch": subnetName, + } + selector := map[string]string{"app": "lb-svc-dynamic"} + service := framework.MakeService(serviceName, corev1.ServiceTypeLoadBalancer, annotations, selector, ports, corev1.ServiceAffinityNone) + _ = serviceClient.CreateSync(service) + + ginkgo.By("Waiting for 5 seconds") + time.Sleep(5 * time.Second) + + deploymentName := lbSvcDeploymentName(serviceName) + ginkgo.By("Getting deployment " + deploymentName) + deploy, err := cs.AppsV1().Deployments(namespaceName).Get(context.Background(), deploymentName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get deployment") + framework.ExpectEqual(deploy.Status.AvailableReplicas, int32(1)) + + ginkgo.By("Waiting for deployment " + deploymentName + " to be ready") + err = deployment.WaitForDeploymentComplete(cs, deploy) + framework.ExpectNoError(err, "deployment failed to complete") + + ginkgo.By("Getting pods for deployment " + deploymentName) + pods, err := deployment.GetPodsForDeployment(cs, deploy) + framework.ExpectNoError(err) + framework.ExpectHaveLen(pods.Items, 1) + + ginkgo.By("Checking pod annotations") + key := fmt.Sprintf(util.AllocatedAnnotationTemplate, subnetProvider) + framework.ExpectHaveKeyWithValue(pods.Items[0].Annotations, key, "true") + cidrKey := fmt.Sprintf(util.CidrAnnotationTemplate, subnetProvider) + ipKey := fmt.Sprintf(util.IpAddressAnnotationTemplate, subnetProvider) + framework.ExpectHaveKey(pods.Items[0].Annotations, cidrKey) + framework.ExpectHaveKey(pods.Items[0].Annotations, ipKey) + cidr := pods.Items[0].Annotations[cidrKey] + ip := pods.Items[0].Annotations[ipKey] + framework.ExpectTrue(util.CIDRContainIP(cidr, ip)) + + ginkgo.By("Checking service external IP") + service = serviceClient.Get(serviceName) + framework.ExpectNotEmpty(service.Status.LoadBalancer.Ingress) + framework.ExpectEqual(service.Status.LoadBalancer.Ingress[0].IP, ip) + }) + + framework.ConformanceIt("should allocate static external IP for service", func() { + ginkgo.By("Creating service " + serviceName) + base := util.Ip2BigInt(gateway) + lbIP := util.BigInt2Ip(base.Add(base, big.NewInt(100))) + ports := []corev1.ServicePort{{ + Name: "tcp", + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }} + annotations := map[string]string{ + subnetProvider + ".kubernetes.io/logical_switch": subnetName, + } + selector := map[string]string{"app": "lb-svc-static"} + service := framework.MakeService(serviceName, corev1.ServiceTypeLoadBalancer, annotations, selector, ports, corev1.ServiceAffinityNone) + service.Spec.LoadBalancerIP = lbIP + _ = serviceClient.Create(service) + + ginkgo.By("Waiting for 10 seconds") + time.Sleep(10 * time.Second) + + deploymentName := lbSvcDeploymentName(serviceName) + ginkgo.By("Getting deployment " + deploymentName) + deploy, err := cs.AppsV1().Deployments(namespaceName).Get(context.Background(), deploymentName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get deployment") + framework.ExpectEqual(deploy.Status.AvailableReplicas, int32(1)) + + ginkgo.By("Waiting for deployment " + deploymentName + " to be ready") + err = deployment.WaitForDeploymentComplete(cs, deploy) + framework.ExpectNoError(err, "deployment failed to complete") + + ginkgo.By("Getting pods for deployment " + deploymentName) + pods, err := deployment.GetPodsForDeployment(cs, deploy) + framework.ExpectNoError(err) + framework.ExpectHaveLen(pods.Items, 1) + + ginkgo.By("Checking pod annotations") + key := fmt.Sprintf(util.AllocatedAnnotationTemplate, subnetProvider) + framework.ExpectHaveKeyWithValue(pods.Items[0].Annotations, key, "true") + ipKey := fmt.Sprintf(util.IpAddressAnnotationTemplate, subnetProvider) + framework.ExpectHaveKeyWithValue(pods.Items[0].Annotations, ipKey, lbIP) + cidr := pods.Items[0].Annotations[fmt.Sprintf(util.CidrAnnotationTemplate, subnetProvider)] + framework.ExpectTrue(util.CIDRContainIP(cidr, lbIP)) + + ginkgo.By("Checking service external IP") + service = serviceClient.Get(serviceName) + framework.ExpectNotEmpty(service.Status.LoadBalancer.Ingress) + framework.ExpectEqual(service.Status.LoadBalancer.Ingress[0].IP, lbIP) + }) +}) diff --git a/test/e2e/node/node.go b/test/e2e/node/node.go deleted file mode 100644 index 9eb46b2ff10..00000000000 --- a/test/e2e/node/node.go +++ /dev/null @@ -1,37 +0,0 @@ -package node - -import ( - "context" - "fmt" - "os" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = Describe("[Node Init]", func() { - f := framework.NewFramework("ip allocation", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - It("node annotations", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), "join", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, no := range nodes.Items { - annotations := no.Annotations - Expect(annotations[util.AllocatedAnnotation]).To(Equal("true")) - Expect(annotations[util.CidrAnnotation]).To(Equal(subnet.Spec.CIDRBlock)) - Expect(annotations[util.GatewayAnnotation]).To(Equal(subnet.Spec.Gateway)) - Expect(annotations[util.IpAddressAnnotation]).NotTo(BeEmpty()) - Expect(util.CIDRContainIP(annotations[util.CidrAnnotation], annotations[util.IpAddressAnnotation])).To(BeTrue()) - Expect(annotations[util.MacAddressAnnotation]).NotTo(BeEmpty()) - Expect(annotations[util.PortNameAnnotation]).NotTo(BeEmpty()) - Expect(annotations[util.LogicalSwitchAnnotation]).To(Equal(subnet.Name)) - } - }) -}) diff --git a/test/e2e/ovn-ic/e2e_test.go b/test/e2e/ovn-ic/e2e_test.go new file mode 100644 index 00000000000..030737f434b --- /dev/null +++ b/test/e2e/ovn-ic/e2e_test.go @@ -0,0 +1,207 @@ +package ovn_ic + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" + + "github.com/onsi/ginkgo/v2" + + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +var clusters []string + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + k8sframework.RegisterCommonFlags(flag.CommandLine) + k8sframework.RegisterClusterFlags(flag.CommandLine) + + // Parse all the flags + flag.Parse() + if k8sframework.TestContext.KubeConfig == "" { + k8sframework.TestContext.KubeConfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) + + var err error + if clusters, err = kind.ListClusters(); err != nil { + panic(fmt.Sprintf("failed to list kind clusters: %v", err)) + } + if len(clusters) < 2 { + panic("no enough kind clusters to run ovn-ic e2e testing") + } +} + +func TestE2E(t *testing.T) { + e2e.RunE2ETests(t) +} + +func execOrDie(kubeContext, cmd string) string { + ginkgo.By(`Switching context to ` + kubeContext) + e2ekubectl.NewKubectlCommand("", "config", "use-context", kubeContext).ExecOrDie("") + + ginkgo.By(`Executing "kubectl ` + cmd + `"`) + return e2ekubectl.NewKubectlCommand("", strings.Fields(cmd)...).ExecOrDie("") +} + +func execPodOrDie(kubeContext, namespace, pod, cmd string) string { + ginkgo.By(`Switching context to ` + kubeContext) + e2ekubectl.NewKubectlCommand("", "config", "use-context", kubeContext).ExecOrDie("") + + ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, namespace, pod)) + return e2epodoutput.RunHostCmdOrDie(namespace, pod, cmd) +} + +var _ = framework.OrderedDescribe("[group:ovn-ic]", func() { + frameworks := make([]*framework.Framework, len(clusters)) + for i := range clusters { + frameworks[i] = framework.NewFrameworkWithContext("ovn-ic", "kind-"+clusters[i]) + } + + clientSets := make([]clientset.Interface, len(clusters)) + podClients := make([]*framework.PodClient, len(clusters)) + namespaceNames := make([]string, len(clusters)) + var kubectlConfig string + ginkgo.BeforeEach(func() { + for i := range clusters { + clientSets[i] = frameworks[i].ClientSet + podClients[i] = frameworks[i].PodClient() + namespaceNames[i] = frameworks[i].Namespace.Name + } + kubectlConfig = k8sframework.TestContext.KubeConfig + k8sframework.TestContext.KubeConfig = "" + }) + ginkgo.AfterEach(func() { + k8sframework.TestContext.KubeConfig = kubectlConfig + }) + + fnCheckPodHTTP := func() { + podNames := make([]string, len(clusters)) + pods := make([]*corev1.Pod, len(clusters)) + ports := make([]string, len(clusters)) + for i := range clusters { + podNames[i] = "pod-" + framework.RandomSuffix() + ginkgo.By("Creating pod " + podNames[i] + " in cluster " + clusters[i]) + port := 8000 + rand.Intn(1000) + ports[i] = strconv.Itoa(port) + args := []string{"netexec", "--http-port", ports[i]} + pods[i] = framework.MakePod(namespaceNames[i], podNames[i], nil, nil, framework.AgnhostImage, nil, args) + pods[i].Spec.Containers[0].ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(port), + }, + }, + } + pods[i] = podClients[i].CreateSync(pods[i]) + } + + for i := range clusters { + sourceIPs := make([]string, 0, len(pods[i].Status.PodIPs)) + for _, podIP := range pods[i].Status.PodIPs { + sourceIPs = append(sourceIPs, podIP.IP) + } + + for j := range clusters { + if j == i { + continue + } + + for _, podIP := range pods[j].Status.PodIPs { + ip := podIP.IP + protocol := strings.ToLower(util.CheckProtocol(ip)) + ginkgo.By("Checking connection from cluster " + clusters[i] + " to cluster " + clusters[j] + " via " + protocol) + cmd := fmt.Sprintf("curl -q -s --connect-timeout 5 %s/clientip", net.JoinHostPort(ip, ports[j])) + output := execPodOrDie(frameworks[i].KubeContext, pods[i].Namespace, pods[i].Name, cmd) + client, _, err := net.SplitHostPort(strings.TrimSpace(output)) + framework.ExpectNoError(err) + framework.ExpectContainElement(sourceIPs, client) + } + } + } + } + + framework.ConformanceIt("should create logical switch ts", func() { + azNames := make([]string, len(clusters)) + for i := range clusters { + ginkgo.By("fetching the ConfigMap in cluster " + clusters[i]) + cm, err := clientSets[i].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.TODO(), util.InterconnectionConfig, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get ConfigMap") + azNames[i] = cm.Data["az-name"] + } + + for i := range clusters { + ginkgo.By("Ensuring logical switch ts exists in cluster " + clusters[i]) + output := execOrDie(frameworks[i].KubeContext, "ko nbctl show ts") + for _, az := range azNames { + framework.ExpectTrue(strings.Contains(output, "ts-"+az), "should have lsp ts-"+az) + } + } + }) + + framework.ConformanceIt("should be able to communicate between clusters", func() { + fnCheckPodHTTP() + }) + + framework.ConformanceIt("should be able to update az name", func() { + azNames := make([]string, len(clusters)) + for i := range clusters { + ginkgo.By("fetching the ConfigMap in cluster " + clusters[i]) + cm, err := clientSets[i].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.TODO(), util.InterconnectionConfig, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get ConfigMap") + azNames[i] = cm.Data["az-name"] + } + + azNames[0] = fmt.Sprintf("az%04d", rand.Intn(10000)) + configMapPatchPayload, err := json.Marshal(corev1.ConfigMap{ + Data: map[string]string{ + "az-name": azNames[0], + }, + }) + framework.ExpectNoError(err, "failed to marshal patch data") + + ginkgo.By("patching the ConfigMap in cluster " + clusters[0]) + _, err = clientSets[0].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Patch(context.TODO(), util.InterconnectionConfig, k8stypes.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{}) + framework.ExpectNoError(err, "failed to patch ConfigMap") + + ginkgo.By("Waiting for new az names to be applied") + time.Sleep(10 * time.Second) + + ginkgo.By("Ensuring logical switch ts exists in cluster " + clusters[0]) + output := execOrDie(frameworks[0].KubeContext, "ko nbctl show ts") + for _, az := range azNames { + lsp := "ts-" + az + framework.ExpectTrue(strings.Contains(output, lsp), "should have lsp "+lsp) + framework.ExpectTrue(strings.Contains(output, lsp), "should have lsp "+lsp) + } + + fnCheckPodHTTP() + }) +}) diff --git a/test/e2e/qos/qos.go b/test/e2e/qos/qos.go deleted file mode 100644 index 1da4501c1c8..00000000000 --- a/test/e2e/qos/qos.go +++ /dev/null @@ -1,330 +0,0 @@ -package qos - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const testImage = "kubeovn/pause:3.2" - -var _ = Describe("[Qos]", func() { - namespace := "qos" - f := framework.NewFramework("qos", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - } - if _, err := f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}); err != nil { - Fail(err.Error()) - } - - It("create netem qos", func() { - name := f.GetName() - autoMount := false - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "e2e": "true", - "kubernetes.io/hostname": "kube-ovn-control-plane", - }, - Annotations: map[string]string{ - util.NetemQosLatencyAnnotation: "600", - util.NetemQosLimitAnnotation: "2000", - util.NetemQosLossAnnotation: "10", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "kube-ovn-control-plane", - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - - By("Create pod") - _, err := f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("Check Qos annotation") - pod, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[util.AllocatedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.RoutedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.NetemQosLatencyAnnotation]).To(Equal("600")) - Expect(pod.Annotations[util.NetemQosLimitAnnotation]).To(Equal("2000")) - Expect(pod.Annotations[util.NetemQosLossAnnotation]).To(Equal("10")) - - By("Check Ovs Qos Para") - time.Sleep(3 * time.Second) - qos, err := framework.GetPodNetemQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(qos.Latency).To(Equal("600000")) - Expect(qos.Limit).To(Equal("2000")) - Expect(qos.Loss).To(Equal("10")) - - By("Delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("update netem qos", func() { - name := f.GetName() - autoMount := false - oriPod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "e2e": "true", - "kubernetes.io/hostname": "kube-ovn-control-plane", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "kube-ovn-control-plane", - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - - By("Create pod") - _, err := f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), oriPod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - oriPod, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - pod := oriPod.DeepCopy() - - By("Annotate pod with netem qos") - pod.Annotations[util.NetemQosLatencyAnnotation] = "600" - pod.Annotations[util.NetemQosLimitAnnotation] = "2000" - pod.Annotations[util.NetemQosLossAnnotation] = "10" - patch, err := util.GenerateStrategicMergePatchPayload(oriPod, pod) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Patch(context.Background(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - Expect(err).NotTo(HaveOccurred()) - - pod, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[util.AllocatedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.RoutedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.NetemQosLatencyAnnotation]).To(Equal("600")) - Expect(pod.Annotations[util.NetemQosLimitAnnotation]).To(Equal("2000")) - Expect(pod.Annotations[util.NetemQosLossAnnotation]).To(Equal("10")) - - By("Check ovs qos") - time.Sleep(3 * time.Second) - qos, err := framework.GetPodNetemQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(qos.Latency).To(Equal("600000")) - Expect(qos.Limit).To(Equal("2000")) - Expect(qos.Loss).To(Equal("10")) - - By("Delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("create htb qos", func() { - name := f.GetName() - autoMount := false - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "e2e": "true", - "kubernetes.io/hostname": "kube-ovn-control-plane", - }, - Annotations: map[string]string{ - util.PriorityAnnotation: "50", - util.IngressRateAnnotation: "300", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "kube-ovn-control-plane", - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - - By("Create pod") - _, err := f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("Check Qos annotation") - pod, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[util.AllocatedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.RoutedAnnotation]).To(Equal("true")) - Expect(pod.Annotations[util.PriorityAnnotation]).To(Equal("50")) - Expect(pod.Annotations[util.IngressRateAnnotation]).To(Equal("300")) - - By("Check Ovs Qos Para") - time.Sleep(3 * time.Second) - priority, rate, err := framework.GetPodHtbQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(priority).To(Equal("50")) - Expect(rate).To(Equal("300000000")) - - By("Delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("update htb qos", func() { - name := f.GetName() - isIPv6 := strings.EqualFold(os.Getenv("IPV6"), "true") - cidr := "20.6.0.0/16" - if isIPv6 { - cidr = "fc00:20:6::/112" - } - - By("create subnet with htbqos") - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - Protocol: util.CheckProtocol(cidr), - HtbQos: util.HtbQosLow, - Namespaces: []string{namespace}, - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(subnet.Spec.HtbQos).To(Equal(util.HtbQosLow)) - - autoMount := false - oriPod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "e2e": "true", - "kubernetes.io/hostname": "kube-ovn-control-plane", - }, - Annotations: map[string]string{ - util.LogicalSwitchAnnotation: subnet.Name, - }, - }, - Spec: corev1.PodSpec{ - NodeName: "kube-ovn-control-plane", - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - - By("Create pod") - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), oriPod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - oriPod, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - By("Check Ovs Qos Para, same as subnet") - time.Sleep(3 * time.Second) - priority, _, err := framework.GetPodHtbQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(priority).To(Equal("5")) - - By("Annotate pod with priority") - pod := oriPod.DeepCopy() - pod.Annotations[util.PriorityAnnotation] = "2" - - patch, err := util.GenerateStrategicMergePatchPayload(oriPod, pod) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Patch(context.Background(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - Expect(err).NotTo(HaveOccurred()) - - pod, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[util.PriorityAnnotation]).To(Equal("2")) - - By("Check Ovs Qos Para") - time.Sleep(3 * time.Second) - priority, _, err = framework.GetPodHtbQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(priority).To(Equal("2")) - - By("Delete Pod priority annotation") - testPod := pod.DeepCopy() - delete(testPod.Annotations, util.PriorityAnnotation) - - patch, err = util.GenerateStrategicMergePatchPayload(pod, testPod) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Patch(context.Background(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - Expect(err).NotTo(HaveOccurred()) - - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Check Ovs Qos Para, priority from subnet") - time.Sleep(3 * time.Second) - priority, _, err = framework.GetPodHtbQosPara(name, namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(priority).To(Equal("5")) - - By("Delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) -}) diff --git a/test/e2e/service/service.go b/test/e2e/service/service.go deleted file mode 100644 index 058431a78f7..00000000000 --- a/test/e2e/service/service.go +++ /dev/null @@ -1,358 +0,0 @@ -package service - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const namespace = "kube-system" - -var dockerArgs = []string{"exec", "kube-ovn-e2e", "curl"} - -func nodeIPs(node corev1.Node) []string { - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(node) - var nodeIPs []string - if nodeIPv4 != "" { - nodeIPs = append(nodeIPs, nodeIPv4) - } - if nodeIPv6 != "" { - nodeIPs = append(nodeIPs, nodeIPv6) - } - return nodeIPs -} - -func curlArgs(ip string, port int32) string { - return fmt.Sprintf("-s -m 3 -o /dev/null -w %%{http_code} %s/metrics", util.JoinHostPort(ip, port)) -} - -func kubectlArgs(pod, ip string, port int32) string { - return fmt.Sprintf("-n kube-system exec %s -- curl %s", pod, curlArgs(ip, port)) -} - -func setSvcTypeToNodePort(kubeClientSet kubernetes.Interface, name string) (*corev1.Service, error) { - svc, err := kubeClientSet.CoreV1().Services(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if svc.Spec.Type == corev1.ServiceTypeNodePort { - return svc, nil - } - - newSvc := svc.DeepCopy() - newSvc.Spec.Type = corev1.ServiceTypeNodePort - return kubeClientSet.CoreV1().Services(svc.Namespace).Update(context.Background(), newSvc, metav1.UpdateOptions{}) -} - -func setSvcEtpToLocal(kubeClientSet kubernetes.Interface, name string) (*corev1.Service, error) { - svc, err := kubeClientSet.CoreV1().Services(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if svc.Spec.Type == corev1.ServiceTypeNodePort && svc.Spec.ExternalTrafficPolicy == corev1.ServiceExternalTrafficPolicyTypeLocal { - return svc, nil - } - - newSvc := svc.DeepCopy() - newSvc.Spec.Type = corev1.ServiceTypeNodePort - newSvc.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal - return kubeClientSet.CoreV1().Services(svc.Namespace).Update(context.Background(), newSvc, metav1.UpdateOptions{}) -} - -func hasEndpoint(node string, endpoints *corev1.Endpoints) bool { - for _, subset := range endpoints.Subsets { - for _, addr := range subset.Addresses { - if addr.NodeName != nil && *addr.NodeName == node { - return true - } - } - } - return false -} - -func checkService(checkCount int, shouldSucceed bool, cmd string, args ...string) { - for i := 0; i < checkCount; i++ { - c := exec.Command(cmd, args...) - var stdout, stderr bytes.Buffer - c.Stdout, c.Stderr = &stdout, &stderr - err := c.Run() - output := strings.TrimSpace(stdout.String()) - if shouldSucceed { - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", output, strings.TrimSpace(stderr.String()))) - Expect(output).To(Equal("200")) - } else { - Expect(err).To(HaveOccurred()) - Expect(output).To(Equal("000")) - } - } -} - -var _ = Describe("[Service]", func() { - f := framework.NewFramework("service", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - hostPods, err := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - - containerPods, err := f.KubeClientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-pinger"}) - Expect(err).NotTo(HaveOccurred()) - - hostService, err := setSvcTypeToNodePort(f.KubeClientSet, "kube-ovn-cni") - Expect(err).NotTo(HaveOccurred()) - - containerService, err := setSvcTypeToNodePort(f.KubeClientSet, "kube-ovn-pinger") - Expect(err).NotTo(HaveOccurred()) - - localEtpHostService, err := setSvcEtpToLocal(f.KubeClientSet, "kube-ovn-monitor") - Expect(err).NotTo(HaveOccurred()) - - localEtpHostEndpoints, err := f.KubeClientSet.CoreV1().Endpoints(namespace).Get(context.Background(), localEtpHostService.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - checkCount := len(nodes.Items) - - // var _ciliumChaining, proxyIpvsMode bool - var proxyIpvsMode bool - _, err = f.KubeClientSet.AppsV1().DaemonSets(namespace).Get(context.Background(), "cilium", metav1.GetOptions{}) - if err == nil { - // _ciliumChaining = true - } else { - Expect(errors.IsNotFound(err)).To(BeTrue()) - - kubeProxyConfigMap, err := f.KubeClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.Background(), kubeadmconstants.KubeProxyConfigMap, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - kubeProxyConfig := &kubeproxyconfig.KubeProxyConfiguration{} - err = k8sruntime.DecodeInto(kubeproxyscheme.Codecs.UniversalDecoder(), []byte(kubeProxyConfigMap.Data[kubeadmconstants.KubeProxyConfigMapKey]), kubeProxyConfig) - Expect(err).NotTo(HaveOccurred()) - - proxyIpvsMode = kubeProxyConfig.Mode == kubeproxyconfig.ProxyModeIPVS - } - - Context("service with host network endpoints", func() { - It("container to ClusterIP", func() { - port := hostService.Spec.Ports[0].Port - for _, ip := range hostService.Spec.ClusterIPs { - for _, pod := range containerPods.Items { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("host to ClusterIP", func() { - port := hostService.Spec.Ports[0].Port - for _, ip := range hostService.Spec.ClusterIPs { - for _, pod := range hostPods.Items { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("external to ClusterIP", func() { - /* if ciliumChaining { - return - }*/ - - port := hostService.Spec.Ports[0].Port - for _, ip := range hostService.Spec.ClusterIPs { - checkService(checkCount, true, "docker", append(dockerArgs, strings.Fields(curlArgs(ip, port))...)...) - } - }) - - It("container to NodePort", func() { - port := hostService.Spec.Ports[0].NodePort - for _, pod := range containerPods.Items { - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("host to NodePort", func() { - port := hostService.Spec.Ports[0].NodePort - for _, pod := range hostPods.Items { - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("external to NodePort", func() { - /* if ciliumChaining { - return - }*/ - - port := hostService.Spec.Ports[0].NodePort - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "docker", append(dockerArgs, strings.Fields(curlArgs(nodeIP, port))...)...) - } - } - }) - }) - - Context("service with container network endpoints", func() { - It("container to ClusterIP", func() { - port := containerService.Spec.Ports[0].Port - for _, ip := range containerService.Spec.ClusterIPs { - for _, pod := range containerPods.Items { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("host to ClusterIP", func() { - port := containerService.Spec.Ports[0].Port - for _, ip := range containerService.Spec.ClusterIPs { - for _, pod := range hostPods.Items { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("external to ClusterIP", func() { - /* if ciliumChaining { - return - }*/ - - port := containerService.Spec.Ports[0].Port - for _, ip := range containerService.Spec.ClusterIPs { - checkService(checkCount, true, "docker", append(dockerArgs, strings.Fields(curlArgs(ip, port))...)...) - } - }) - - It("container to NodePort", func() { - port := containerService.Spec.Ports[0].NodePort - for _, pod := range containerPods.Items { - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("host to NodePort", func() { - port := containerService.Spec.Ports[0].NodePort - for _, pod := range hostPods.Items { - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("external to NodePort", func() { - /* if ciliumChaining { - return - }*/ - - port := containerService.Spec.Ports[0].NodePort - for _, node := range nodes.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "docker", append(dockerArgs, strings.Fields(curlArgs(nodeIP, port))...)...) - } - } - }) - }) - - Context("host service with local external traffic policy", func() { - It("container to ClusterIP", func() { - port := localEtpHostService.Spec.Ports[0].Port - for _, pod := range containerPods.Items { - for _, ip := range localEtpHostService.Spec.ClusterIPs { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("host to ClusterIP", func() { - port := localEtpHostService.Spec.Ports[0].Port - for _, pod := range hostPods.Items { - for _, ip := range localEtpHostService.Spec.ClusterIPs { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...) - } - } - }) - - It("external to ClusterIP", func() { - /* if ciliumChaining { - return - }*/ - - port := localEtpHostService.Spec.Ports[0].Port - for _, ip := range localEtpHostService.Spec.ClusterIPs { - checkService(checkCount, true, "docker", append(dockerArgs, strings.Fields(curlArgs(ip, port))...)...) - } - }) - - It("container to NodePort", func() { - /* if ciliumChaining { - return - }*/ - - port := localEtpHostService.Spec.Ports[0].NodePort - for _, node := range nodes.Items { - for _, pod := range containerPods.Items { - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, true, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("host to NodePort", func() { - /* if ciliumChaining { - return - }*/ - - port := localEtpHostService.Spec.Ports[0].NodePort - for _, node := range nodes.Items { - hasEndpoint := proxyIpvsMode || hasEndpoint(node.Name, localEtpHostEndpoints) - for _, pod := range hostPods.Items { - shouldSucceed := hasEndpoint || pod.Spec.NodeName == node.Name - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, shouldSucceed, "kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...) - } - } - } - }) - - It("external to NodePort", func() { - /* if ciliumChaining { - return - }*/ - - port := localEtpHostService.Spec.Ports[0].NodePort - for _, node := range nodes.Items { - shouldSucceed := proxyIpvsMode || hasEndpoint(node.Name, localEtpHostEndpoints) - for _, nodeIP := range nodeIPs(node) { - checkService(checkCount, shouldSucceed, "docker", append(dockerArgs, strings.Fields(curlArgs(nodeIP, port))...)...) - } - } - }) - }) -}) diff --git a/test/e2e/subnet/normal.go b/test/e2e/subnet/normal.go deleted file mode 100644 index 254c7a748c1..00000000000 --- a/test/e2e/subnet/normal.go +++ /dev/null @@ -1,685 +0,0 @@ -package subnet - -import ( - "context" - "fmt" - "math" - "os" - "os/exec" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = Describe("[Subnet]", func() { - f := framework.NewFramework("subnet", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - BeforeEach(func() { - if err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete subnet %s, %v", f.GetName(), err) - } - } - if err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete ns %s, %v", f.GetName(), err) - } - } - }) - AfterEach(func() { - if err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete subnet %s, %v", f.GetName(), err) - } - } - if err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete ns %s, %v", f.GetName(), err) - } - } - }) - - isIPv6 := strings.EqualFold(os.Getenv("IPV6"), "true") - - Describe("Create", func() { - It("only cidr", func() { - name := f.GetName() - af, cidr, protocol := 4, "11.10.0.0/16", kubeovn.ProtocolIPv4 - if isIPv6 { - af, cidr, protocol = 6, "fc00:11:10::/112", kubeovn.ProtocolIPv6 - } - gateway, _ := util.FirstIP(cidr) - - By("create subnet") - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(subnet.Spec.Default).To(BeFalse()) - Expect(subnet.Spec.Protocol).To(Equal(protocol)) - Expect(subnet.Spec.Namespaces).To(BeEmpty()) - Expect(subnet.Spec.ExcludeIps).To(ContainElement(gateway)) - Expect(subnet.Spec.Gateway).To(Equal(gateway)) - Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWDistributedType)) - Expect(subnet.Spec.GatewayNode).To(BeEmpty()) - Expect(subnet.Spec.NatOutgoing).To(BeFalse()) - Expect(subnet.Spec.Private).To(BeFalse()) - Expect(subnet.Spec.AllowSubnets).To(BeEmpty()) - Expect(subnet.ObjectMeta.Finalizers).To(ContainElement(util.ControllerName)) - - By("validate status") - Expect(subnet.Status.ActivateGateway).To(BeEmpty()) - if isIPv6 { - Expect(subnet.Status.V6AvailableIPs).To(Equal(math.Exp2(128-112) - 3)) - } else { - Expect(subnet.Status.V4AvailableIPs).To(Equal(math.Exp2(32-16) - 3)) - } - Expect(subnet.Status.V4UsingIPs).To(BeZero()) - Expect(subnet.Status.V6UsingIPs).To(BeZero()) - - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods.Items { - stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route list root %s", af, subnet.Spec.CIDRBlock), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring("ovn0")) - } - }) - - It("centralized gateway", func() { - name := f.GetName() - cidr := "11.11.0.0/16" - if isIPv6 { - cidr = "fc00:11:11::/112" - } - - By("create subnet") - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - GatewayType: kubeovn.GWCentralizedType, - GatewayNode: "kube-ovn-control-plane,kube-ovn-worker,kube-ovn-worker2", - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - time.Sleep(5 * time.Second) - - subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWCentralizedType)) - }) - }) - - Describe("Update", func() { - It("distributed to centralized", func() { - name := f.GetName() - cidr := "11.12.0.0/16" - if isIPv6 { - cidr = "fc00:11:12::/112" - } - - By("create subnet") - s := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - for i := 0; i < 3; i++ { - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - s.Spec.GatewayType = kubeovn.GWCentralizedType - s.Spec.GatewayNode = "kube-ovn-control-plane,kube-ovn-worker,kube-ovn-worker2" - _, err = f.OvnClientSet.KubeovnV1().Subnets().Update(context.Background(), s, metav1.UpdateOptions{}) - if err == nil { - break - } - } - Expect(err).NotTo(HaveOccurred()) - - time.Sleep(5 * time.Second) - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Expect(s.Spec.GatewayType).To(Equal(kubeovn.GWCentralizedType)) - }) - }) - - Describe("Delete", func() { - It("normal deletion", func() { - name := f.GetName() - cidr := "11.13.0.0/16" - if isIPv6 { - cidr = "fc00:11:13::/112" - } - By("create subnet") - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - time.Sleep(5 * time.Second) - err = f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - - time.Sleep(5 * time.Second) - pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods.Items { - stdout, _, err := f.ExecToPodThroughAPI("ip route", "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).NotTo(ContainSubstring(s.Spec.CIDRBlock)) - } - }) - }) - - Describe("cidr with nonstandard style", func() { - It("cidr ends with nonzero", func() { - name := f.GetName() - cidr := "11.14.0.10/16" - if isIPv6 { - cidr = "fc00:11:14::/112" - } - By("create subnet") - s := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - Protocol: util.CheckProtocol(cidr), - }, - } - - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - if !isIPv6 { - Expect(s.Spec.CIDRBlock).To(Equal("11.14.0.0/16")) - } else { - Expect(s.Spec.CIDRBlock).To(Equal("fc00:11:14::/112")) - } - - }) - }) - - Describe("available ip calculation", func() { - It("no available cidr", func() { - name := f.GetName() - s := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "19.0.0.0/31", - ExcludeIps: []string{"179.17.0.0..179.17.0.10"}, - Protocol: util.CheckProtocol("19.0.0.0/31"), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(s.Status.V4AvailableIPs).To(Equal(float64(0))) - }) - - It("small cidr", func() { - name := f.GetName() - s := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "29.0.0.0/30", - ExcludeIps: []string{"179.17.0.0..179.17.0.10"}, - Protocol: util.CheckProtocol("29.0.0.0/30"), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(s.Status.V4AvailableIPs).To(Equal(float64(1))) - }) - - It("with excludeips", func() { - name := f.GetName() - s := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "179.17.0.0/24", - ExcludeIps: []string{"179.17.0.0..179.17.0.10"}, - Protocol: util.CheckProtocol("179.17.0.0/24"), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(s.Status.V4AvailableIPs).To(Equal(float64(244))) - }) - }) - - Describe("External Egress Gateway", func() { - It("centralized gateway with external egress gateway", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nodes).NotTo(BeNil()) - Expect(nodes.Items).NotTo(BeEmpty()) - - for _, node := range nodes.Items { - Expect(node.Status.Addresses).NotTo(BeEmpty()) - } - - name := f.GetName() - priority, tableID := uint32(1001), uint32(1002) - - af, cidr := 4, "11.15.0.0/16" - if isIPv6 { - af, cidr = 6, "fc00:11:15::/112" - } - - var egw string - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[0]) - if isIPv6 { - egw, _ = util.FirstIP(fmt.Sprintf("%s/%d", nodeIPv6, 64)) - } else { - egw, _ = util.FirstIP(fmt.Sprintf("%s/%d", nodeIPv4, 16)) - } - - gatewayNodes := make([]string, 0, 2) - nodeIPs := make(map[string]string) - for i := 0; i < 2 && i < len(nodes.Items); i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(nodes.Items[i]) - if nodeIPv4 != "" { - nodeIPs[nodeIPv4] = gatewayNodes[i] - } - if nodeIPv6 != "" { - nodeIPs[nodeIPv6] = gatewayNodes[i] - } - } - - By("create subnet") - subnet := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - GatewayType: kubeovn.GWCentralizedType, - GatewayNode: strings.Join(gatewayNodes, ","), - ExternalEgressGateway: egw, - PolicyRoutingPriority: priority, - PolicyRoutingTableID: tableID, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), subnet, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - subnet, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWCentralizedType)) - Expect(subnet.Spec.ExternalEgressGateway).To(Equal(egw)) - Expect(subnet.Spec.PolicyRoutingPriority).To(Equal(priority)) - Expect(subnet.Spec.PolicyRoutingTableID).To(Equal(tableID)) - time.Sleep(5 * time.Second) - - ovsPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - - rulePrefix := fmt.Sprintf("%d:", priority) - ruleSuffix := fmt.Sprintf("from %s lookup %d", subnet.Spec.CIDRBlock, tableID) - routePrefix := fmt.Sprintf("default via %s ", egw) - - for _, pod := range ovsPods.Items { - if nodeIPs[pod.Status.HostIP] == "" { - continue - } - - stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d rule show", af), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var found bool - rules := strings.Split(stdout, "\n") - for _, rule := range rules { - if strings.HasPrefix(rule, rulePrefix) && strings.HasSuffix(rule, ruleSuffix) { - found = true - break - } - } - Expect(found).To(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(HavePrefix(routePrefix)) - } - - By("dis-ready gateway node") - gwNodesResult, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - gatewayNodes = strings.Split(gwNodesResult.Spec.GatewayNode, ",") - - cmd := "docker exec -i " + gatewayNodes[0] + " systemctl stop kubelet" - _, err = exec.Command("bash", "-c", cmd).CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - - time.Sleep(time.Second * 10) - for _, pod := range ovsPods.Items { - if nodeIPs[pod.Status.HostIP] == "" || pod.Spec.NodeName == gatewayNodes[0] { - continue - } - - stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d rule show", af), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var found bool - rules := strings.Split(stdout, "\n") - for _, rule := range rules { - if strings.HasPrefix(rule, rulePrefix) && strings.HasSuffix(rule, ruleSuffix) { - found = true - break - } - } - Expect(found).To(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(HavePrefix(routePrefix)) - } - - cmd = "docker exec -i " + gatewayNodes[0] + " systemctl restart kubelet" - _, err = exec.Command("bash", "-c", cmd).CombinedOutput() - Expect(err).NotTo(HaveOccurred()) - - time.Sleep(time.Second * 30) - - By("delete subnet") - err = f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - time.Sleep(5 * time.Second) - - for _, pod := range ovsPods.Items { - if nodeIPs[pod.Status.HostIP] == "" { - continue - } - - stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d rule show", af), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var found bool - rules := strings.Split(stdout, "\n") - for _, rule := range rules { - if strings.HasPrefix(rule, rulePrefix) && strings.HasSuffix(rule, ruleSuffix) { - found = true - break - } - } - Expect(found).NotTo(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", pod.Name, pod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).NotTo(HavePrefix(routePrefix)) - } - }) - - It("distributed gateway with external egress gateway", func() { - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nodes).NotTo(BeNil()) - Expect(nodes.Items).NotTo(BeEmpty()) - - for _, node := range nodes.Items { - Expect(node.Status.Addresses).NotTo(BeEmpty()) - } - - By("create namespace") - namespace := f.GetName() - _, err = f.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: map[string]string{"e2e": "true"}, - }, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - name := f.GetName() - priority, tableID := uint32(1003), uint32(1004) - - af, cidr := 4, "11.16.0.0/16" - if isIPv6 { - af, cidr = 6, "fc00:11:16::/112" - } - - var selectedNode *corev1.Node - for i, node := range nodes.Items { - if node.Spec.Unschedulable { - continue - } - - var unschedulable bool - for _, t := range node.Spec.Taints { - if t.Effect == corev1.TaintEffectNoSchedule { - unschedulable = true - break - } - } - if !unschedulable { - selectedNode = &nodes.Items[i] - break - } - } - Expect(selectedNode).NotTo(BeNil()) - - var egw string - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(*selectedNode) - if isIPv6 { - egw, _ = util.FirstIP(fmt.Sprintf("%s/%d", nodeIPv6, 64)) - } else { - egw, _ = util.FirstIP(fmt.Sprintf("%s/%d", nodeIPv4, 16)) - } - - By("create subnet") - s := kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: cidr, - GatewayType: kubeovn.GWDistributedType, - ExternalEgressGateway: egw, - PolicyRoutingPriority: priority, - PolicyRoutingTableID: tableID, - Namespaces: []string{namespace}, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err = f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(name) - Expect(err).NotTo(HaveOccurred()) - - subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWDistributedType)) - Expect(subnet.Spec.ExternalEgressGateway).To(Equal(egw)) - Expect(subnet.Spec.PolicyRoutingPriority).To(Equal(priority)) - Expect(subnet.Spec.PolicyRoutingTableID).To(Equal(tableID)) - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: "kubeovn/pause:3.2", - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - NodeSelector: map[string]string{"kubernetes.io/hostname": selectedNode.Name}, - }, - } - - By("create pod") - _, err = f.KubeClientSet.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - pod, err = f.WaitPodReady(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - ovsPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - - rulePrefix := fmt.Sprintf("%d:", priority) - ruleSuffix := fmt.Sprintf("from %s lookup %d", pod.Status.PodIP, tableID) - routePrefix := fmt.Sprintf("default via %s ", egw) - - var ovsPod *corev1.Pod - for i := range ovsPods.Items { - if ovsPods.Items[i].Spec.NodeName == selectedNode.Name { - ovsPod = &ovsPods.Items[i] - break - } - } - Expect(ovsPod).NotTo(BeNil()) - - stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d rule show", af), "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var found bool - rules := strings.Split(stdout, "\n") - for _, rule := range rules { - if strings.HasPrefix(rule, rulePrefix) && strings.HasSuffix(rule, ruleSuffix) { - found = true - break - } - } - Expect(found).To(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(HavePrefix(routePrefix)) - - By("delete pod") - err = f.KubeClientSet.CoreV1().Pods(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - - err = f.WaitPodDeleted(name, namespace) - Expect(err).NotTo(HaveOccurred()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d rule show", af), "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - found = false - rules = strings.Split(stdout, "\n") - for _, rule := range rules { - if strings.HasPrefix(rule, rulePrefix) && strings.HasSuffix(rule, ruleSuffix) { - found = true - break - } - } - Expect(found).NotTo(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(HavePrefix(routePrefix)) - - By("delete subnet") - err = f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - time.Sleep(5 * time.Second) - - stdout, _, err = f.ExecToPodThroughAPI(fmt.Sprintf("ip -%d route show table %d", af, tableID), "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).NotTo(HavePrefix(routePrefix)) - }) - }) -}) diff --git a/test/e2e/underlay/underlay.go b/test/e2e/underlay/underlay.go deleted file mode 100644 index 9c02e9ac1de..00000000000 --- a/test/e2e/underlay/underlay.go +++ /dev/null @@ -1,866 +0,0 @@ -package underlay - -import ( - "context" - "fmt" - "math/rand" - "os" - "os/exec" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const ( - UnderlayInterface = "eth1" - - ProviderNetwork = "net1" - Vlan = "vlan-e2e" - Subnet = "e2e-underlay" - Namespace = "underlay" - - testImage = "kubeovn/pause:3.2" -) - -var ( - ExchangeLinkName bool - - VlanID = os.Getenv("VLAN_ID") - - cidr string - nodeIPs []string - - nodeMac = make(map[string]string) - nodeAddrs = make(map[string][]string) - nodeRoutes = make(map[string][]string) - nodeMTU = make(map[string]int) -) - -func init() { - rand.Seed(time.Now().UnixNano()) - ExchangeLinkName = rand.Intn(2) != 0 -} - -func SetCIDR(s string) { - cidr = s -} -func AddNodeIP(ip string) { - nodeIPs = append(nodeIPs, ip) -} - -func SetNodeMac(node, mac string) { - nodeMac[node] = mac -} -func AddNodeAddrs(node, addr string) { - nodeAddrs[node] = append(nodeAddrs[node], addr) -} -func AddNodeRoutes(node, route string) { - nodeRoutes[node] = append(nodeRoutes[node], route) -} -func SetNodeMTU(node string, mtu int) { - nodeMTU[node] = mtu -} - -var _ = Describe("[Underlay]", func() { - f := framework.NewFramework("underlay", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))) - - Context("[Provider Network]", func() { - It("normal", func() { - By("validate node labels") - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - - for _, node := range nodes.Items { - Expect(node.Labels[fmt.Sprintf(util.ProviderNetworkExcludeTemplate, ProviderNetwork)]).To(BeEmpty()) - Expect(node.Labels[fmt.Sprintf(util.ProviderNetworkInterfaceTemplate, ProviderNetwork)]).To(Equal(UnderlayInterface)) - Expect(node.Labels[fmt.Sprintf(util.ProviderNetworkReadyTemplate, ProviderNetwork)]).To(Equal("true")) - Expect(node.Labels[fmt.Sprintf(util.ProviderNetworkMtuTemplate, ProviderNetwork)]).NotTo(BeEmpty()) - } - - By("validate OVS bridge") - ovsPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"}) - Expect(err).NotTo(HaveOccurred()) - Expect(ovsPods).NotTo(BeNil()) - for _, node := range nodes.Items { - var ovsPod *corev1.Pod - for _, pod := range ovsPods.Items { - if pod.Spec.NodeName == node.Name { - ovsPod = &pod - break - } - } - Expect(ovsPod).NotTo(BeNil()) - - nic, br := UnderlayInterface, util.ExternalBridgeName(ProviderNetwork) - if ExchangeLinkName { - nic, br = br, nic - } - stdout, _, err := f.ExecToPodThroughAPI("ip addr show "+nic, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - addrFound := make([]bool, len(nodeAddrs[node.Name])) - for _, s := range strings.Split(stdout, "\n") { - s = strings.TrimSpace(s) - for i, addr := range nodeAddrs[node.Name] { - if addrFound[i] { - continue - } - if strings.HasPrefix(s, fmt.Sprintf("inet %s ", addr)) || strings.HasPrefix(s, fmt.Sprintf("inet6 %s ", addr)) { - addrFound[i] = true - GinkgoWriter.Printf("found node %s address %s: '%s'\n", node.Name, addr, s) - break - } - } - } - for _, found := range addrFound { - Expect(found).NotTo(BeTrue()) - } - - stdout, _, err = f.ExecToPodThroughAPI("ovs-vsctl list-ports "+br, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var portFound bool - for _, port := range strings.Split(stdout, "\n") { - if port == nic { - portFound = true - break - } - } - Expect(portFound).To(BeTrue()) - - stdout, _, err = f.ExecToPodThroughAPI("ip addr show "+br, "openvswitch", ovsPod.Name, ovsPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - - var isUp bool - addrFound = make([]bool, len(nodeAddrs[node.Name])) - for i, s := range strings.Split(stdout, "\n") { - if i == 0 { - idx1, idx2 := strings.IndexRune(s, '<'), strings.IndexRune(s, '>') - if idx1 > 0 && idx2 > idx1+1 { - for _, state := range strings.Split(s[idx1+1:idx2], ",") { - if state == "UP" { - isUp = true - break - } - } - } - continue - } - if VlanID == "" { - if i == 1 { - if mac := nodeMac[node.Name]; mac != "" { - Expect(strings.TrimSpace(s)).To(HavePrefix("link/ether %s ", mac)) - continue - } - } - - s = strings.TrimSpace(s) - for i, addr := range nodeAddrs[node.Name] { - if addrFound[i] { - continue - } - if strings.HasPrefix(s, fmt.Sprintf("inet %s ", addr)) || strings.HasPrefix(s, fmt.Sprintf("inet6 %s ", addr)) { - addrFound[i] = true - break - } - } - } - } - Expect(isUp).To(BeTrue()) - if VlanID == "" { - for _, found := range addrFound { - Expect(found).To(BeTrue()) - } - } - } - }) - - It("node annotation", func() { - By("add exclude annotation") - nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - - for _, node := range nodes.Items { - newNode := node.DeepCopy() - newNode.Annotations[fmt.Sprintf(util.ProviderNetworkExcludeTemplate, ProviderNetwork)] = "true" - _, err = f.KubeClientSet.CoreV1().Nodes().Update(context.Background(), newNode, metav1.UpdateOptions{}) - Expect(err).NotTo(HaveOccurred()) - } - - By("wait provider network to be ready") - time.Sleep(3 * time.Second) - err = f.WaitProviderNetworkReady(ProviderNetwork) - Expect(err).NotTo(HaveOccurred()) - - By("validate provider network") - pn, err := f.OvnClientSet.KubeovnV1().ProviderNetworks().Get(context.Background(), ProviderNetwork, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes.Items { - Expect(util.ContainsString(pn.Spec.ExcludeNodes, node.Name)).To(BeTrue()) - } - - By("validate node annotation") - nodes, err = f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - - for _, node := range nodes.Items { - Expect(node.Annotations).NotTo(HaveKey(fmt.Sprintf(util.ProviderNetworkExcludeTemplate, ProviderNetwork))) - } - - By("restore provider network") - pn, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Get(context.Background(), ProviderNetwork, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - newPn := pn.DeepCopy() - newPn.Spec.ExcludeNodes = nil - _, err = f.OvnClientSet.KubeovnV1().ProviderNetworks().Update(context.Background(), newPn, metav1.UpdateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("wait provider network to be ready") - time.Sleep(3 * time.Second) - err = f.WaitProviderNetworkReady(ProviderNetwork) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - Context("[Subnet]", func() { - BeforeEach(func() { - err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete subnet %s: %v", f.GetName(), err) - } - }) - AfterEach(func() { - err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete subnet %s: %v", f.GetName(), err) - } - }) - - It("logical gateway", func() { - name := f.GetName() - - By("create subnet") - subnet := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "99.11.0.0/16", - Vlan: Vlan, - LogicalGateway: true, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), subnet, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(subnet.Name) - Expect(err).NotTo(HaveOccurred()) - - By("validate OVN logical router port") - ovnPods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovn-central,ovn-nb-leader=true"}) - Expect(err).NotTo(HaveOccurred()) - Expect(ovnPods).NotTo(BeNil()) - Expect(ovnPods.Items).To(HaveLen(1)) - - ovnPod := ovnPods.Items[0] - lsp := fmt.Sprintf("%s-%s", name, util.DefaultVpc) - cmd := fmt.Sprintf("ovn-nbctl --no-heading --columns=_uuid find logical_switch_port name=%s", lsp) - uuid, _, err := f.ExecToPodThroughAPI(cmd, "ovn-central", ovnPod.Name, ovnPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(uuid).NotTo(BeEmpty()) - - lrp := fmt.Sprintf("%s-%s", util.DefaultVpc, name) - cmd = fmt.Sprintf("ovn-nbctl --no-heading --columns=_uuid find logical_router_port name=%s", lrp) - uuid, _, err = f.ExecToPodThroughAPI(cmd, "ovn-central", ovnPod.Name, ovnPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(uuid).NotTo(BeEmpty()) - }) - - It("disable gateway check", func() { - name := f.GetName() - - By("create subnet") - subnet := &kubeovn.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: kubeovn.SubnetSpec{ - CIDRBlock: "99.12.0.0/16", - Vlan: Vlan, - DisableGatewayCheck: true, - Protocol: util.CheckProtocol(cidr), - }, - } - _, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), subnet, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("validate subnet") - err = f.WaitSubnetReady(subnet.Name) - Expect(err).NotTo(HaveOccurred()) - - By("create pod") - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: Namespace, - Annotations: map[string]string{util.LogicalSwitchAnnotation: subnet.Name}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: name, - Image: "kubeovn/pause:3.2", - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - }, - } - _, err = f.KubeClientSet.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.WaitPodReady(pod.Name, pod.Namespace) - Expect(err).NotTo(HaveOccurred()) - - By("delete pod") - err = f.KubeClientSet.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - Context("[Pod]", func() { - var cniPods map[string]corev1.Pod - BeforeEach(func() { - nodeList, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeList).NotTo(BeNil()) - - podList, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=kube-ovn-cni"}) - Expect(err).NotTo(HaveOccurred()) - Expect(podList).NotTo(BeNil()) - Expect(len(podList.Items)).To(Equal(len(nodeList.Items))) - - cniPods = make(map[string]corev1.Pod) - for _, node := range nodeList.Items { - var cniPod *corev1.Pod - for _, pod := range podList.Items { - if pod.Spec.NodeName == node.Name { - cniPod = &pod - break - } - } - Expect(cniPod).NotTo(BeNil()) - cniPods[node.Name] = *cniPod - } - }) - - Context("[MTU]", func() { - BeforeEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - AfterEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - - It("normal", func() { - By("create pod") - var autoMount bool - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pod, err = f.WaitPodReady(pod.Name, Namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Spec.NodeName).NotTo(BeEmpty()) - - By("get cni pod") - cniPod, ok := cniPods[pod.Spec.NodeName] - Expect(ok).To(BeTrue()) - - By("get pod's netns") - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", pod.Name, Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("validate pod's MTU") - cmd = fmt.Sprintf("nsenter --net=%s ip link show eth0", netns) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" mtu %d ", nodeMTU[pod.Spec.NodeName])) - }) - }) - - Context("[Connectivity]", func() { - Context("[Host-Pod]", func() { - if VlanID != "" { - return - } - - BeforeEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - AfterEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - - It("hp", func() { - By("create pod") - var autoMount bool - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pod, err = f.WaitPodReady(pod.Name, Namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Spec.NodeName).NotTo(BeEmpty()) - - By("get pod's netns") - cniPod := cniPods[pod.Spec.NodeName] - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", pod.Name, Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("get host IP") - var hostIP string - for _, addr := range nodeAddrs[pod.Spec.NodeName] { - if util.CIDRContainIP(cidr, strings.Split(addr, "/")[0]) { - hostIP = strings.Split(addr, "/")[0] - break - } - } - Expect(hostIP).ToNot(BeEmpty()) - - By("ping host") - cmd = fmt.Sprintf("nsenter --net=%s ping -c1 -W1 %s", netns, hostIP) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" 0% packet loss")) - }) - }) - - Context("[Host-Host-Pod]", func() { - if VlanID != "" { - return - } - - BeforeEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - AfterEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - - It("hhp", func() { - if len(cniPods) < 2 { - return - } - - By("select nodes") - nodes := make([]string, 0, 2) - for node := range cniPods { - nodes = append(nodes, node) - if len(nodes) == 2 { - break - } - } - Expect(len(nodes)).To(Equal(2)) - - By("create pod") - var autoMount bool - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - NodeName: nodes[0], - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pod, err = f.WaitPodReady(pod.Name, Namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Spec.NodeName).NotTo(BeEmpty()) - - By("get pod's netns") - cniPod := cniPods[pod.Spec.NodeName] - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", pod.Name, Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("get host IP") - var hostIP string - for _, addr := range nodeAddrs[nodes[1]] { - if util.CIDRContainIP(cidr, strings.Split(addr, "/")[0]) { - hostIP = strings.Split(addr, "/")[0] - break - } - } - Expect(hostIP).ToNot(BeEmpty()) - - By("ping host") - cmd = fmt.Sprintf("nsenter --net=%s ping -c1 -W1 %s", netns, hostIP) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" 0% packet loss")) - }) - }) - - Context("Pod-Host-Host-Pod", func() { - BeforeEach(func() { - for i := 0; i < len(cniPods); i++ { - name := fmt.Sprintf("%s-%d", f.GetName(), i+1) - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", name, err) - } - } - }) - AfterEach(func() { - for i := 0; i < len(cniPods); i++ { - name := fmt.Sprintf("%s-%d", f.GetName(), i+1) - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", name, err) - } - } - }) - - It("phhp", func() { - if len(cniPods) < 2 { - return - } - - By("select nodes") - nodes := make([]string, 0, len(cniPods)) - for node := range cniPods { - nodes = append(nodes, node) - } - - By("create pods") - name := f.GetName() - pods := make([]*corev1.Pod, len(nodes)) - var autoMount bool - for i := range nodes { - pods[i] = &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", name, i+1), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - NodeName: nodes[i], - Containers: []corev1.Container{ - { - Name: name, - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(Namespace).Create(context.Background(), pods[i], metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pods[i], err = f.WaitPodReady(pods[i].Name, Namespace) - Expect(err).NotTo(HaveOccurred()) - } - - for i := range pods { - By("get pod's netns") - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", pods[i].Name, Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPods[nodes[i]].Name, cniPods[nodes[i]].Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("ping another pod") - cmd = fmt.Sprintf("nsenter --net=%s ping -c1 -W1 %s", netns, pods[(i+len(pods)+1)%len(pods)].Status.PodIP) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPods[nodes[i]].Name, cniPods[nodes[i]].Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" 0% packet loss")) - } - }) - }) - - Context("[Overlay-Underlay]", func() { - if VlanID != "" { - return - } - - defaultSubnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), util.DefaultSubnet, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to get subnet %s: %v", util.DefaultSubnet, err) - } - if defaultSubnet.Spec.LogicalGateway { - return - } - - overlayNamespace := "default" - - BeforeEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s/%s: %v", Namespace, f.GetName(), err) - } - err = f.KubeClientSet.CoreV1().Pods(overlayNamespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s/%s: %v", overlayNamespace, f.GetName(), err) - } - }) - AfterEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s/%s: %v", Namespace, f.GetName(), err) - } - err = f.KubeClientSet.CoreV1().Pods(overlayNamespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s/%s: %v", overlayNamespace, f.GetName(), err) - } - }) - - It("o2u", func() { - if strings.EqualFold(os.Getenv("IPV6"), "true") { - return - } - - By("create underlay pod") - var autoMount bool - upod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(upod.Namespace).Create(context.Background(), upod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - upod, err = f.WaitPodReady(upod.Name, upod.Namespace) - Expect(err).NotTo(HaveOccurred()) - Expect(upod.Spec.NodeName).NotTo(BeEmpty()) - - By("create overlay pod") - opod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: overlayNamespace, - Labels: map[string]string{"e2e": "true"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err = f.KubeClientSet.CoreV1().Pods(opod.Namespace).Create(context.Background(), opod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - opod, err = f.WaitPodReady(opod.Name, opod.Namespace) - Expect(err).NotTo(HaveOccurred()) - - By("get overlay pod's netns") - cniPod := cniPods[opod.Spec.NodeName] - cmd := fmt.Sprintf("ovs-vsctl --no-heading --columns=external_ids find interface external-ids:pod_name=%s external-ids:pod_namespace=%s", opod.Name, opod.Namespace) - stdout, _, err := f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - var netns string - for _, field := range strings.Fields(stdout) { - if strings.HasPrefix(field, "pod_netns=") { - netns = strings.TrimPrefix(field, "pod_netns=") - netns = strings.Trim(netns[:len(netns)-1], `"`) - break - } - } - Expect(netns).NotTo(BeEmpty()) - - By("ping underlay pod") - cmd = fmt.Sprintf("nsenter --net=%s ping -c1 -W1 %s", netns, upod.Status.PodIP) - stdout, _, err = f.ExecToPodThroughAPI(cmd, "cni-server", cniPod.Name, cniPod.Namespace, nil) - Expect(err).NotTo(HaveOccurred()) - Expect(stdout).To(ContainSubstring(" 0% packet loss")) - }) - }) - }) - }) - - Context("[kubectl-ko]", func() { - BeforeEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - AfterEach(func() { - err := f.KubeClientSet.CoreV1().Pods(Namespace).Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - klog.Fatalf("failed to delete pod %s: %v", f.GetName(), err) - } - }) - - It("trace", func() { - By("create pod") - var autoMount bool - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.GetName(), - Namespace: Namespace, - Labels: map[string]string{"e2e": "true"}, - Annotations: map[string]string{util.LogicalSwitchAnnotation: Subnet}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: f.GetName(), - Image: testImage, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - AutomountServiceAccountToken: &autoMount, - }, - } - _, err := f.KubeClientSet.CoreV1().Pods(Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - pod, err = f.WaitPodReady(pod.Name, Namespace) - Expect(err).NotTo(HaveOccurred()) - - dst := "114.114.114.114" - if util.CheckProtocol(pod.Status.PodIP) == kubeovn.ProtocolIPv6 { - dst = "2400:3200::1" - } - - output, err := exec.Command("kubectl", "ko", "trace", fmt.Sprintf("%s/%s", Namespace, pod.Name), dst, "icmp").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("%s/%s", Namespace, pod.Name), dst, "tcp", "80").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - - output, err = exec.Command("kubectl", "ko", "trace", fmt.Sprintf("%s/%s", Namespace, pod.Name), dst, "udp", "53").CombinedOutput() - Expect(err).NotTo(HaveOccurred(), string(output)) - }) - }) -}) diff --git a/test/networkpolicy-cyclonus/cyclonus.yaml b/test/networkpolicy-cyclonus/cyclonus.yaml deleted file mode 100644 index ac3fc15cdfa..00000000000 --- a/test/networkpolicy-cyclonus/cyclonus.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: cyclonus - namespace: kube-system -spec: - template: - spec: - restartPolicy: Never - containers: - - command: - - ./cyclonus - - generate - - --exclude= - - --include=upstream-e2e - - --retries=3 - - --noisy=true - - --ignore-loopback=true - - --cleanup-namespaces=true - - --server-port=80 - - --server-protocol=tcp - name: cyclonus - imagePullPolicy: IfNotPresent - image: mfenwick100/cyclonus:v0.5.0 - serviceAccount: cyclonus diff --git a/test/networkpolicy-cyclonus/start-test.sh b/test/networkpolicy-cyclonus/start-test.sh deleted file mode 100755 index e551bd66750..00000000000 --- a/test/networkpolicy-cyclonus/start-test.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail -set -xv - -# set up cyclonus -kubectl create clusterrolebinding cyclonus --clusterrole=cluster-admin --serviceaccount=kube-system:cyclonus -kubectl create sa cyclonus -n kube-system -kubectl create -f ./cyclonus.yaml - -# don't fail on errors, so we can dump the logs. -set +e - -time kubectl wait --for=condition=complete --timeout=60m -n kube-system job.batch/cyclonus -rc=$? - -# grab the job logs -LOG_FILE=$(mktemp) -kubectl logs -n kube-system job.batch/cyclonus > "$LOG_FILE" -cat "$LOG_FILE" - -# if 'failure' is in the logs, fail; otherwise succeed -cat "$LOG_FILE" | grep "failure" > /dev/null 2>&1 && rc=1 -exit $rc