Skip to content
This repository has been archived by the owner on Jan 11, 2023. It is now read-only.

Commit

Permalink
Add e2e tests, example for zones
Browse files Browse the repository at this point in the history
  • Loading branch information
ritazh committed Aug 21, 2018
1 parent 8efb2a3 commit b95ef71
Show file tree
Hide file tree
Showing 9 changed files with 342 additions and 1 deletion.
39 changes: 39 additions & 0 deletions examples/e2e-tests/kubernetes/zones/definition.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Kubernetes",
"orchestratorRelease": "1.12"
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool",
"count": 4,
"vmSize": "Standard_DS2_v2",
"AvailabilityZones": [
"1",
"2"
]
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"servicePrincipalProfile": {
"clientId": "",
"secret": ""
}
}
}
2 changes: 1 addition & 1 deletion pkg/api/common/versions.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ var AllKubernetesSupportedVersions = map[string]bool{
"1.11.1": true,
"1.11.2": true,
"1.12.0-alpha.1": true,
"1.12.0-beta.0": true,
"1.12.0-beta.0": true, // required for availability zones feature
}

// GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release
Expand Down
14 changes: 14 additions & 0 deletions test/e2e/engine/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,20 @@ func (e *Engine) HasNetworkPolicy(name string) bool {
return false
}

// HasAllZonesAgentPools will return true if all of the agent pools have zones
func (e *Engine) HasAllZonesAgentPools() bool {
count := 0
for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
if ap.AvailabilityZones != nil && len(ap.AvailabilityZones) > 0 {
count++
}
}
if count == len(e.ExpandedDefinition.Properties.AgentPoolProfiles) {
return true
}
return false
}

// Write will write the cluster definition to disk
func (e *Engine) Write() error {
json, err := helpers.JSONMarshal(e.ClusterDefinition, false)
Expand Down
86 changes: 86 additions & 0 deletions test/e2e/kubernetes/kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"

"github.com/Azure/acs-engine/pkg/api/common"
Expand All @@ -17,6 +18,7 @@ import (
"github.com/Azure/acs-engine/test/e2e/kubernetes/job"
"github.com/Azure/acs-engine/test/e2e/kubernetes/networkpolicy"
"github.com/Azure/acs-engine/test/e2e/kubernetes/node"
"github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolume"
"github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolumeclaims"
"github.com/Azure/acs-engine/test/e2e/kubernetes/pod"
"github.com/Azure/acs-engine/test/e2e/kubernetes/service"
Expand Down Expand Up @@ -847,6 +849,90 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
})

Describe("with all zoned agent pools", func() {
It("should be labeled with zones for each node", func() {
if eng.HasAllZonesAgentPools() {
nodeList, err := node.Get()
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Nodes {
role := node.Metadata.Labels["kubernetes.io/role"]
if role == "agent" {
By("Ensuring that we get zones for each agent node")
zones := node.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
contains := strings.Contains(zones, "-")
Expect(contains).To(Equal(true))
}
}
} else {
Skip("Availability zones was not configured for this Cluster Definition")
}
})

It("should create pv with zone labels and node affinity", func() {
if eng.HasAllZonesAgentPools() {
By("Creating a persistent volume claim")
pvcName := "azure-managed-disk" // should be the same as in pvc-premium.yaml
pvc, err := persistentvolumeclaims.CreatePersistentVolumeClaimsFromFile(filepath.Join(WorkloadDir, "pvc-premium.yaml"), pvcName, "default")
Expect(err).NotTo(HaveOccurred())
ready, err := pvc.WaitOnReady("default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))

pvList, err := persistentvolume.Get()
Expect(err).NotTo(HaveOccurred())
pvZone := ""
for _, pv := range pvList.PersistentVolumes {
By("Ensuring that we get zones for the pv")
// zone is chosen by round-robin across all zones
pvZone = pv.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
contains := strings.Contains(pvZone, "-")
Expect(contains).To(Equal(true))
// VolumeScheduling feature gate is set to true by default starting v1.10+
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
if expression.Key == "failure-domain.beta.kubernetes.io/zone" {
By("Ensuring that we get nodeAffinity for each pv")
value := expression.Values[0]
fmt.Printf("NodeAffinity value: %s\n", value)
contains := strings.Contains(value, "-")
Expect(contains).To(Equal(true))
}
}
}

By("Launching a pod using the volume claim")
podName := "zone-pv-pod" // should be the same as in pod-pvc.yaml
testPod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "pod-pvc.yaml"), podName, "default")
Expect(err).NotTo(HaveOccurred())
ready, err = testPod.WaitOnReady(5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))

By("Checking that the pod can access volume")
valid, err := testPod.ValidatePVC("/mnt/azure", 10, 10*time.Second)
Expect(valid).To(BeTrue())
Expect(err).NotTo(HaveOccurred())

By("Ensuring that attached volume pv has the same zone as the zone of the node")
nodeName := testPod.Spec.NodeName
nodeList, err := node.GetByPrefix(nodeName)
Expect(err).NotTo(HaveOccurred())
nodeZone := nodeList[0].Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
fmt.Printf("nodeZone: %s\n", nodeZone)
Expect(nodeZone == pvZone).To(Equal(true))

By("Cleaning up after ourselves")
err = testPod.Delete()
Expect(err).NotTo(HaveOccurred())
err = pvc.Delete()
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Availability zones was not configured for this Cluster Definition")
}
})
})

Describe("after the cluster has been up for awhile", func() {
It("dns-liveness pod should not have any restarts", func() {
if !eng.HasNetworkPolicy("calico") {
Expand Down
124 changes: 124 additions & 0 deletions test/e2e/kubernetes/persistentvolume/persistentvolume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
package persistentvolume

import (
"context"
"encoding/json"
"log"
"os/exec"
"time"

"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)

// PersistentVolume is used to parse data from kubectl get pv
type PersistentVolume struct {
Metadata Metadata `json:"metadata"`
Spec Spec `json:"spec"`
Status Status `json:"status"`
}

// Metadata holds information like name, create time, and namespace
type Metadata struct {
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Name string `json:"name"`
}

// Spec holds information like storageClassName, nodeAffinity
type Spec struct {
StorageClassName string `json:"storageClassName"`
NodeAffinity NodeAffinity `json:"nodeAffinity"`
}

// NodeAffinity holds information like required nodeselector
type NodeAffinity struct {
Required *NodeSelector `json:"required"`
}

// NodeSelector represents the union of the results of one or more label queries
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"`
}

// NodeSelectorTerm represents node selector requirements
type NodeSelectorTerm struct {
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty"`
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty"`
}

// NodeSelectorRequirement is a selector that contains values, a key, and an operator
type NodeSelectorRequirement struct {
Key string `json:"key"`
Values []string `json:"values,omitempty"`
}

// Status holds information like phase
type Status struct {
Phase string `json:"phase"`
}

// List is used to parse out PersistentVolume from a list
type List struct {
PersistentVolumes []PersistentVolume `json:"items"`
}

// Get returns the current pvs for a given kubeconfig
func Get() (*List, error) {
cmd := exec.Command("kubectl", "get", "pv", "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to run 'kubectl get pv':%s", string(out))
return nil, err
}
pvl := List{}
err = json.Unmarshal(out, &pvl)
if err != nil {
log.Printf("Error unmarshalling pvs json:%s", err)
}
return &pvl, nil
}

// WaitOnReady will block until all pvs are in ready state
func WaitOnReady(pvCount int, sleep, duration time.Duration) bool {
readyCh := make(chan bool, 1)
errCh := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
errCh <- errors.Errorf("Timeout exceeded (%s) while waiting for PVs to become Bound", duration.String())
default:
if AreAllReady(pvCount) {
readyCh <- true
}
time.Sleep(sleep)
}
}
}()
for {
select {
case <-errCh:
return false
case ready := <-readyCh:
return ready
}
}
}

// AreAllReady returns a bool depending on cluster state
func AreAllReady(pvCount int) bool {
list, _ := Get()
if list != nil && len(list.PersistentVolumes) == pvCount {
for _, pv := range list.PersistentVolumes {
if pv.Status.Phase == "Bound" {
return true
}
}
}
return false
}
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,18 @@ func Get(pvcName, namespace string) (*PersistentVolumeClaims, error) {
return &pvc, nil
}

// Delete will delete a PersistentVolumeClaims in a given namespace
func (pvc *PersistentVolumeClaims) Delete() error {
cmd := exec.Command("kubectl", "delete", "pvc", "-n", pvc.Metadata.NameSpace, pvc.Metadata.Name)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error while trying to delete PVC %s in namespace %s:%s\n", pvc.Metadata.Name, pvc.Metadata.NameSpace, string(out))
return err
}
return nil
}

// WaitOnReady will block until PersistentVolumeClaims is available
func (pvc *PersistentVolumeClaims) WaitOnReady(namespace string, sleep, duration time.Duration) (bool, error) {
readyCh := make(chan bool, 1)
Expand Down
40 changes: 40 additions & 0 deletions test/e2e/kubernetes/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ type Metadata struct {
// Spec holds information like containers
type Spec struct {
Containers []Container `json:"containers"`
NodeName string `json:"nodeName"`
}

// Container holds information like image and ports
Expand Down Expand Up @@ -659,6 +660,45 @@ func (p *Pod) ValidateAzureFile(mountPath string, sleep, duration time.Duration)
}
}

// ValidatePVC will keep retrying the check if azure disk is mounted in Pod
func (p *Pod) ValidatePVC(mountPath string, sleep, duration time.Duration) (bool, error) {
readyCh := make(chan bool, 1)
errCh := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
errCh <- errors.Errorf("Timeout exceeded (%s) while waiting for Pod (%s) to check azure disk mounted", duration.String(), p.Metadata.Name)
default:
out, err := p.Exec("--", "mkdir", mountPath+"/"+testDir)
if err == nil {
out, err := p.Exec("--", "ls", mountPath)
if err == nil && strings.Contains(string(out), testDir) {
readyCh <- true
} else {
log.Printf("Error:%s\n", err)
log.Printf("Out:%s\n", out)
}
} else {
log.Printf("Error:%s\n", err)
log.Printf("Out:%s\n", out)
}
time.Sleep(sleep)
}
}
}()
for {
select {
case err := <-errCh:
return false, err
case ready := <-readyCh:
return ready, nil
}
}
}

// ValidateResources checks that an addon has the expected memory/cpu limits and requests
func (c *Container) ValidateResources(a api.KubernetesContainerSpec) error {
expectedCPURequests := a.CPURequests
Expand Down
15 changes: 15 additions & 0 deletions test/e2e/kubernetes/workloads/pod-pvc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
kind: Pod
apiVersion: v1
metadata:
name: zone-pv-pod
spec:
containers:
- name: myfrontend
image: nginx
volumeMounts:
- mountPath: "/mnt/azure"
name: volume
volumes:
- name: volume
persistentVolumeClaim:
claimName: azure-managed-disk
Loading

0 comments on commit b95ef71

Please sign in to comment.