Skip to content

Commit

Permalink
chore: optimize e2e cases
Browse files Browse the repository at this point in the history
  • Loading branch information
tokers committed Jan 13, 2021
1 parent 86972ee commit 2cb331a
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 21 deletions.
5 changes: 5 additions & 0 deletions test/e2e/ingress/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"time"

"github.com/api7/ingress-controller/test/e2e/scaffold"
"github.com/onsi/ginkgo"
Expand Down Expand Up @@ -49,6 +50,10 @@ spec:
assert.Nil(ginkgo.GinkgoT(), s.EnsureNumApisixRoutesCreated(1), "checking number of routes")
assert.Nil(ginkgo.GinkgoT(), s.EnsureNumApisixUpstreamsCreated(1), "checking number of upstreams")

// TODO When ingress controller can feedback the lifecycle of CRDs to the
// status field, we can poll it rather than sleeping.
time.Sleep(3 * time.Second)

body := s.NewAPISIXClient().GET("/ip").WithHeader("Host", "httpbin.com").Expect().Status(http.StatusOK).Body().Raw()
var placeholder ip
err := json.Unmarshal([]byte(body), &placeholder)
Expand Down
14 changes: 9 additions & 5 deletions test/e2e/ingress/resourcepushing.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,11 @@ spec:
assert.Nil(ginkgo.GinkgoT(), err, "Checking number of routes")
err = s.EnsureNumApisixUpstreamsCreated(1)
assert.Nil(ginkgo.GinkgoT(), err, "Checking number of upstreams")
scale := 2
err = s.ScaleHTTPBIN(scale)
assert.Nil(ginkgo.GinkgoT(), err)
time.Sleep(5 * time.Second) // wait for ingress to sync
assert.Nil(ginkgo.GinkgoT(), s.ScaleHTTPBIN(2), "scaling number of httpbin instancess")
assert.Nil(ginkgo.GinkgoT(), s.WaitAllHTTPBINPoddsAvailable(), "waiting for all httpbin pods ready")
// TODO When ingress controller can feedback the lifecycle of CRDs to the
// status field, we can poll it rather than sleeping.
time.Sleep(5 * time.Second)
ups, err := s.ListApisixUpstreams()
assert.Nil(ginkgo.GinkgoT(), err, "list upstreams error")
assert.Len(ginkgo.GinkgoT(), ups[0].Nodes, 2, "upstreams nodes not expect")
Expand Down Expand Up @@ -84,7 +85,10 @@ spec:

// remove
assert.Nil(ginkgo.GinkgoT(), s.RemoveResourceByString(apisixRoute))
time.Sleep(10 * time.Second) // wait for ingress to sync

// TODO When ingress controller can feedback the lifecycle of CRDs to the
// status field, we can poll it rather than sleeping.
time.Sleep(10 * time.Second)
ups, err := s.ListApisixUpstreams()
assert.Nil(ginkgo.GinkgoT(), err, "list upstreams error")
assert.Len(ginkgo.GinkgoT(), ups, 0, "upstreams nodes not expect")
Expand Down
18 changes: 13 additions & 5 deletions test/e2e/ingress/sanity.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ var _ = ginkgo.Describe("single-route", func() {
assert.Nil(ginkgo.GinkgoT(), err, "checking number of routes")
err = s.EnsureNumApisixUpstreamsCreated(1)
assert.Nil(ginkgo.GinkgoT(), err, "checking number of upstreams")

// TODO When ingress controller can feedback the lifecycle of CRDs to the
// status field, we can poll it rather than sleeping.
time.Sleep(3 * time.Second)

body := s.NewAPISIXClient().GET("/ip").WithHeader("Host", "httpbin.com").Expect().Status(http.StatusOK).Body().Raw()
var placeholder ip
err = json.Unmarshal([]byte(body), &placeholder)
Expand Down Expand Up @@ -92,6 +97,9 @@ var _ = ginkgo.Describe("double-routes", func() {
assert.Nil(ginkgo.GinkgoT(), err, "checking number of routes")
err = s.EnsureNumApisixUpstreamsCreated(1)
assert.Nil(ginkgo.GinkgoT(), err, "checking number of upstreams")
// TODO When ingress controller can feedback the lifecycle of CRDs to the
// status field, we can poll it rather than sleeping.
time.Sleep(3 * time.Second)
body := s.NewAPISIXClient().GET("/ip").WithHeader("Host", "httpbin.com").Expect().Status(http.StatusOK).Body().Raw()
var placeholder ip
err = json.Unmarshal([]byte(body), &placeholder)
Expand All @@ -117,7 +125,7 @@ var _ = ginkgo.Describe("leader election", func() {
pods, err := s.GetIngressPodDetails()
assert.Nil(ginkgo.GinkgoT(), err)
assert.Len(ginkgo.GinkgoT(), pods, 2)
lease, err := s.GetLeaderLease()
lease, err := s.WaitGetLeaderLease()
assert.Nil(ginkgo.GinkgoT(), err)
assert.Equal(ginkgo.GinkgoT(), *lease.Spec.LeaseDurationSeconds, int32(15))
if *lease.Spec.HolderIdentity != pods[0].Name && *lease.Spec.HolderIdentity != pods[1].Name {
Expand All @@ -126,13 +134,11 @@ var _ = ginkgo.Describe("leader election", func() {
})

ginkgo.It("leader failover", func() {
// Wait the leader election to complete.
time.Sleep(2 * time.Second)
pods, err := s.GetIngressPodDetails()
assert.Nil(ginkgo.GinkgoT(), err)
assert.Len(ginkgo.GinkgoT(), pods, 2)

lease, err := s.GetLeaderLease()
lease, err := s.WaitGetLeaderLease()
assert.Nil(ginkgo.GinkgoT(), err)

leaderIdx := 0
Expand All @@ -141,9 +147,11 @@ var _ = ginkgo.Describe("leader election", func() {
}
ginkgo.GinkgoT().Logf("lease is %s", *lease.Spec.HolderIdentity)
assert.Nil(ginkgo.GinkgoT(), s.KillPod(pods[leaderIdx].Name))

// Wait the old lease expire and new leader was elected.
time.Sleep(25 * time.Second)

newLease, err := s.GetLeaderLease()
newLease, err := s.WaitGetLeaderLease()
assert.Nil(ginkgo.GinkgoT(), err)

newPods, err := s.GetIngressPodDetails()
Expand Down
32 changes: 32 additions & 0 deletions test/e2e/scaffold/httpbin.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ import (
"fmt"
"time"

"github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/gruntwork-io/terratest/modules/k8s"
corev1 "k8s.io/api/core/v1"
)
Expand Down Expand Up @@ -112,3 +115,32 @@ func (s *Scaffold) ScaleHTTPBIN(desired int) error {
}
return nil
}

// WaitAllHTTPBINPods waits until all httpbin pods ready.
func (s *Scaffold) WaitAllHTTPBINPoddsAvailable() error {
opts := metav1.ListOptions{
LabelSelector: "app=httpbin-deployment-e2e-test",
}
condFunc := func() (bool, error) {
items, err := k8s.ListPodsE(s.t, s.kubectlOptions, opts)
if err != nil {
return false, err
}
if len(items) == 0 {
ginkgo.GinkgoT().Log("no apisix pods created")
return false, nil
}
for _, item := range items {
for _, cond := range item.Status.Conditions {
if cond.Type != corev1.PodReady {
continue
}
if cond.Status != "True" {
return false, nil
}
}
}
return true, nil
}
return waitExponentialBackoff(condFunc)
}
20 changes: 16 additions & 4 deletions test/e2e/scaffold/ingress.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand Down Expand Up @@ -157,14 +158,25 @@ func (s *Scaffold) waitAllIngressControllerPodsAvailable() error {
return waitExponentialBackoff(condFunc)
}

// GetLeaderLease returns the Lease resource.
func (s *Scaffold) GetLeaderLease() (*coordinationv1.Lease, error) {
// WaitGetLeaderLease waits the lease to be created and returns it.
func (s *Scaffold) WaitGetLeaderLease() (*coordinationv1.Lease, error) {
cli, err := k8s.GetKubernetesClientE(s.t)
if err != nil {
return nil, err
}
lease, err := cli.CoordinationV1().Leases(s.namespace).Get(context.TODO(), "ingress-apisix-leader", metav1.GetOptions{})
if err != nil {
var lease *coordinationv1.Lease
condFunc := func() (bool, error) {
l, err := cli.CoordinationV1().Leases(s.namespace).Get(context.TODO(), "ingress-apisix-leader", metav1.GetOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
return false, nil
}
return false, err
}
lease = l
return true, nil
}
if err := waitExponentialBackoff(condFunc); err != nil {
return nil, err
}
return lease, nil
Expand Down
15 changes: 8 additions & 7 deletions test/e2e/scaffold/scaffold.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,14 +163,12 @@ func (s *Scaffold) beforeEach() {
s.etcdService, err = s.newEtcd()
assert.Nil(s.t, err, "initializing etcd")

// We don't use k8s.WaitUntilServiceAvailable since it hacks for Minikube.
err = k8s.WaitUntilNumPodsCreatedE(s.t, s.kubectlOptions, s.labelSelector("app=etcd-deployment-e2e-test"), 1, 5, 2*time.Second)
err = s.waitAllEtcdPodsAvailable()
assert.Nil(s.t, err, "waiting for etcd ready")

s.apisixService, err = s.newAPISIX()
assert.Nil(s.t, err, "initializing Apache APISIX")

// We don't use k8s.WaitUntilServiceAvailable since it hacks for Minikube.
err = s.waitAllAPISIXPodsAvailable()
assert.Nil(s.t, err, "waiting for apisix ready")

Expand All @@ -194,6 +192,10 @@ func (s *Scaffold) afterEach() {
for _, f := range s.finializers {
f()
}

// Wait for a while to prevent the worker node being overwhelming
// (new cases will be run).
time.Sleep(3 * time.Second)
}

func (s *Scaffold) addFinializer(f func()) {
Expand All @@ -216,10 +218,9 @@ func (s *Scaffold) renderConfig(path string) (string, error) {

func waitExponentialBackoff(condFunc func() (bool, error)) error {
backoff := wait.Backoff{
Duration: 100 * time.Millisecond,
Factor: 3,
Jitter: 0,
Steps: 6,
Duration: 500 * time.Millisecond,
Factor: 2,
Steps: 8,
}
return wait.ExponentialBackoff(backoff, condFunc)
}

0 comments on commit 2cb331a

Please sign in to comment.