From dfbdb4bc4afe84cef9ef0605afb0d3c9e8c6f596 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 25 Jan 2018 11:24:38 -0800 Subject: [PATCH 1/9] conditional addon tests --- test/e2e/engine/template.go | 40 +++++++++ test/e2e/kubernetes/kubernetes_test.go | 108 ++++++++++++++++--------- 2 files changed, 108 insertions(+), 40 deletions(-) diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index c5915d83a1..484558125b 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -156,6 +156,46 @@ func (e *Engine) HasWindowsAgents() bool { return false } +// HasDashboard will return true if kubernetes-dashboard addon is enabled +func (e *Engine) HasDashboard() bool { + for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + if addon.Name == "kubernetes-dashboard" { + return *addon.Enabled + } + } + return false +} + +// HasTiller will return true if tiller addon is enabled +func (e *Engine) HasTiller() bool { + for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + if addon.Name == "tiller" { + return *addon.Enabled + } + } + return false +} + +// HasACIConnector will return true if aci-connector addon is enabled +func (e *Engine) HasACIConnector() bool { + for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + if addon.Name == "aci-connector" { + return *addon.Enabled + } + } + return false +} + +// HasRescheduler will return true if rescheduler addon is enabled +func (e *Engine) HasRescheduler() bool { + for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + if addon.Name == "rescheduler" { + return *addon.Enabled + } + } + return false +} + // OrchestratorVersion1Dot8AndUp will return true if the orchestrator version is 1.8 and up func (e *Engine) OrchestratorVersion1Dot8AndUp() bool { return e.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion >= "1.8" diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 659820c7f3..b6166d3d2e 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -121,56 +121,84 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) It("should have tiller running", func() { - running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) + if eng.HasTiller() { + running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + } else { + Skip("tiller disabled for this cluster, will not test") + } }) It("should be able to access the dashboard from each node", func() { - running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) - - kubeConfig, err := GetConfig() - Expect(err).NotTo(HaveOccurred()) - sshKeyPath := cfg.GetSSHKeyPath() + if eng.HasDashboard() { + running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) - s, err := service.Get("kubernetes-dashboard", "kube-system") - Expect(err).NotTo(HaveOccurred()) - dashboardPort := 80 - version, err := node.Version() - Expect(err).NotTo(HaveOccurred()) + kubeConfig, err := GetConfig() + Expect(err).NotTo(HaveOccurred()) + sshKeyPath := cfg.GetSSHKeyPath() - re := regexp.MustCompile("v1.9") - if re.FindString(version) != "" { - dashboardPort = 443 - } - port := s.GetNodePort(dashboardPort) + s, err := service.Get("kubernetes-dashboard", "kube-system") + Expect(err).NotTo(HaveOccurred()) + dashboardPort := 80 + version, err := node.Version() + Expect(err).NotTo(HaveOccurred()) - master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) - nodeList, err := node.Get() - Expect(err).NotTo(HaveOccurred()) + re := regexp.MustCompile("v1.9") + if re.FindString(version) != "" { + dashboardPort = 443 + } + port := s.GetNodePort(dashboardPort) - if !eng.HasWindowsAgents() { - for _, node := range nodeList.Nodes { - success := false - for i := 0; i < 60; i++ { - dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port) - curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL) - _, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput() - if err == nil { - success = true - break - } - if i > 58 { - log.Println(curlCMD) - log.Println(err.Error()) - log.Printf("%#v\n", err) + master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) + nodeList, err := node.Get() + Expect(err).NotTo(HaveOccurred()) + + if !eng.HasWindowsAgents() { + for _, node := range nodeList.Nodes { + success := false + for i := 0; i < 60; i++ { + dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port) + curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL) + _, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput() + if err == nil { + success = true + break + } + if i > 58 { + log.Println(curlCMD) + log.Println(err.Error()) + log.Printf("%#v\n", err) + } + time.Sleep(10 * time.Second) } - time.Sleep(10 * time.Second) + Expect(success).To(BeTrue()) } - Expect(success).To(BeTrue()) } + } else { + Skip("kubernetes-dashboard disabled for this cluster, will not test") + } + }) + + It("should have aci-connector running", func() { + if eng.HasACIConnector() { + running, err := pod.WaitOnReady("aci-connector", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + } else { + Skip("aci-connector disabled for this cluster, will not test") + } + }) + + It("should have rescheduler running", func() { + if eng.HasACIConnector() { + running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + } else { + Skip("rescheduler disabled for this cluster, will not test") } }) }) From 94e4a3dd6899e26b093ed5fee3ea77e8e0cccaf5 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 25 Jan 2018 14:00:51 -0800 Subject: [PATCH 2/9] uses generated model to introspect cluster features --- test/e2e/dcos/dcos_test.go | 2 +- test/e2e/engine/template.go | 39 ++++++++++++++++++++------ test/e2e/kubernetes/kubernetes_test.go | 9 ++++-- test/e2e/runner.go | 2 +- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/test/e2e/dcos/dcos_test.go b/test/e2e/dcos/dcos_test.go index 0c29fe4b81..353284bdf8 100644 --- a/test/e2e/dcos/dcos_test.go +++ b/test/e2e/dcos/dcos_test.go @@ -31,7 +31,7 @@ var _ = BeforeSuite(func() { engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name) Expect(err).NotTo(HaveOccurred()) - cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate) + cs, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate) Expect(err).NotTo(HaveOccurred()) eng = engine.Engine{ Config: engCfg, diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index 484558125b..79b49e92b4 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -10,6 +10,7 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/api/vlabs" "github.com/Azure/acs-engine/pkg/helpers" + "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/acs-engine/test/e2e/config" "github.com/kelseyhightower/envconfig" ) @@ -38,8 +39,9 @@ type Config struct { // Engine holds necessary information to interact with acs-engine cli type Engine struct { - Config *Config - ClusterDefinition *api.VlabsARMContainerService // Holds the parsed ClusterDefinition + Config *Config + ClusterDefinition *api.VlabsARMContainerService // Holds the parsed ClusterDefinition + ExpandedDefinition *api.ContainerService // Holds the expanded ClusterDefinition } // ParseConfig will return a new engine config struct taking values from env vars @@ -69,7 +71,7 @@ func Build(cfg *config.Config, subnetID string) (*Engine, error) { log.Printf("Error while trying to build Engine Configuration:%s\n", err) } - cs, err := Parse(config.ClusterDefinitionPath) + cs, err := ParseInput(config.ClusterDefinitionPath) if err != nil { return nil, err } @@ -158,7 +160,7 @@ func (e *Engine) HasWindowsAgents() bool { // HasDashboard will return true if kubernetes-dashboard addon is enabled func (e *Engine) HasDashboard() bool { - for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == "kubernetes-dashboard" { return *addon.Enabled } @@ -168,7 +170,7 @@ func (e *Engine) HasDashboard() bool { // HasTiller will return true if tiller addon is enabled func (e *Engine) HasTiller() bool { - for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == "tiller" { return *addon.Enabled } @@ -178,7 +180,7 @@ func (e *Engine) HasTiller() bool { // HasACIConnector will return true if aci-connector addon is enabled func (e *Engine) HasACIConnector() bool { - for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == "aci-connector" { return *addon.Enabled } @@ -188,7 +190,7 @@ func (e *Engine) HasACIConnector() bool { // HasRescheduler will return true if rescheduler addon is enabled func (e *Engine) HasRescheduler() bool { - for _, addon := range e.ClusterDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { + for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == "rescheduler" { return *addon.Enabled } @@ -212,11 +214,12 @@ func (e *Engine) Write() error { if err != nil { log.Printf("Error while trying to write container service definition to file (%s):%s\n%s\n", e.Config.ClusterDefinitionTemplate, err, string(json)) } + return nil } -// Parse takes a template path and will parse that into a api.VlabsARMContainerService -func Parse(path string) (*api.VlabsARMContainerService, error) { +// ParseInput takes a template path and will parse that into a api.VlabsARMContainerService +func ParseInput(path string) (*api.VlabsARMContainerService, error) { contents, err := ioutil.ReadFile(path) if err != nil { log.Printf("Error while trying to read cluster definition at (%s):%s\n", path, err) @@ -229,3 +232,21 @@ func Parse(path string) (*api.VlabsARMContainerService, error) { } return &cs, nil } + +// ParseOutput takes the generated api model and will parse that into a api.ContainerService +func ParseOutput(path string) (*api.ContainerService, error) { + locale, err := i18n.LoadTranslations() + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error())) + } + apiloader := &api.Apiloader{ + Translator: &i18n.Translator{ + Locale: locale, + }, + } + containerService, _, err := apiloader.LoadContainerServiceFromFile(path, true, false, nil) + if err != nil { + return nil, err + } + return containerService, nil +} diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index b6166d3d2e..6d3561d9f2 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -41,11 +41,14 @@ var _ = BeforeSuite(func() { engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name) Expect(err).NotTo(HaveOccurred()) - cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate) + csInput, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate) + Expect(err).NotTo(HaveOccurred()) + csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath + "/apimodel.json") Expect(err).NotTo(HaveOccurred()) eng = engine.Engine{ - Config: engCfg, - ClusterDefinition: cs, + Config: engCfg, + ClusterDefinition: csInput, + ExpandedDefinition: csGenerated, } }) diff --git a/test/e2e/runner.go b/test/e2e/runner.go index 14f9b546c9..0e46134481 100644 --- a/test/e2e/runner.go +++ b/test/e2e/runner.go @@ -74,7 +74,7 @@ func main() { teardown() log.Fatalf("Error trying to parse Engine config:%s\n", err) } - cs, err := engine.Parse(engCfg.ClusterDefinitionTemplate) + cs, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate) if err != nil { teardown() log.Fatalf("Error trying to parse engine template into memory:%s\n", err) From 153236526835dd6ca8a361103884db9964cc0871 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 25 Jan 2018 16:00:21 -0800 Subject: [PATCH 3/9] I heart output --- test/e2e/engine/template.go | 8 +-- test/e2e/kubernetes/deployment/deployment.go | 30 ++++++--- test/e2e/kubernetes/kubernetes_test.go | 70 ++++++++++++++------ test/e2e/kubernetes/util/util.go | 12 ++++ test/e2e/runner/ginkgo.go | 2 +- 5 files changed, 86 insertions(+), 36 deletions(-) create mode 100644 test/e2e/kubernetes/util/util.go diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index 79b49e92b4..d712c8cc6a 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -131,8 +131,8 @@ func Build(cfg *config.Config, subnetID string) (*Engine, error) { // NodeCount returns the number of nodes that should be provisioned for a given cluster definition func (e *Engine) NodeCount() int { - expectedCount := e.ClusterDefinition.Properties.MasterProfile.Count - for _, pool := range e.ClusterDefinition.Properties.AgentPoolProfiles { + expectedCount := e.ExpandedDefinition.Properties.MasterProfile.Count + for _, pool := range e.ExpandedDefinition.Properties.AgentPoolProfiles { expectedCount = expectedCount + pool.Count } return expectedCount @@ -140,7 +140,7 @@ func (e *Engine) NodeCount() int { // HasLinuxAgents will return true if there is at least 1 linux agent pool func (e *Engine) HasLinuxAgents() bool { - for _, ap := range e.ClusterDefinition.Properties.AgentPoolProfiles { + for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles { if ap.OSType == "" || ap.OSType == "Linux" { return true } @@ -150,7 +150,7 @@ func (e *Engine) HasLinuxAgents() bool { // HasWindowsAgents will return true is there is at least 1 windows agent pool func (e *Engine) HasWindowsAgents() bool { - for _, ap := range e.ClusterDefinition.Properties.AgentPoolProfiles { + for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles { if ap.OSType == "Windows" { return true } diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index 8f6f151203..ffd7986500 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Azure/acs-engine/test/e2e/kubernetes/pod" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // List holds a list of deployments returned from kubectl get deploy @@ -56,14 +57,15 @@ type Container struct { // CreateLinuxDeploy will create a deployment for a given image with a name in a namespace // --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}' func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, error) { - var err error - var out []byte + var cmd *exec.Cmd overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}` if miscOpts != "" { - out, err = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides, miscOpts).CombinedOutput() + cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides, miscOpts) } else { - out, err = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides).CombinedOutput() + cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides) } + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -80,7 +82,9 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er // --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}' func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}` - out, err := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command).CombinedOutput() + cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -96,7 +100,9 @@ func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Depl // CreateWindowsDeploy will crete a deployment for a given image with a name in a namespace func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"windows"}}}}}` - out, err := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides).CombinedOutput() + cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -111,7 +117,9 @@ func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) // Get returns a deployment from a name and namespace func Get(name, namespace string) (*Deployment, error) { - out, err := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name).CombinedOutput() + cmd := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to fetch deployment %s in namespace %s:%s\n", name, namespace, string(out)) return nil, err @@ -127,7 +135,9 @@ func Get(name, namespace string) (*Deployment, error) { // Delete will delete a deployment in a given namespace func (d *Deployment) Delete() error { - out, err := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name).CombinedOutput() + cmd := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to delete deployment %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out)) return err @@ -137,7 +147,9 @@ func (d *Deployment) Delete() error { // Expose will create a load balancer and expose the deployment on a given port func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error { - out, err := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort)).CombinedOutput() + cmd := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort)) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to expose (%s) target port (%v) for deployment %s in namespace %s on port %v:%s\n", svcType, targetPort, d.Metadata.Name, d.Metadata.Namespace, exposedPort, string(out)) return err diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 6d3561d9f2..9ebb0ab9a4 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -17,6 +17,7 @@ import ( "github.com/Azure/acs-engine/test/e2e/kubernetes/node" "github.com/Azure/acs-engine/test/e2e/kubernetes/pod" "github.com/Azure/acs-engine/test/e2e/kubernetes/service" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -135,45 +136,52 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should be able to access the dashboard from each node", func() { if eng.HasDashboard() { + By("Ensuring that the kubernetes-dashboard pod is Running") + running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) - kubeConfig, err := GetConfig() - Expect(err).NotTo(HaveOccurred()) - sshKeyPath := cfg.GetSSHKeyPath() + By("Ensuring that the kubernetes-dashboard service is Running") s, err := service.Get("kubernetes-dashboard", "kube-system") Expect(err).NotTo(HaveOccurred()) - dashboardPort := 80 - version, err := node.Version() - Expect(err).NotTo(HaveOccurred()) - re := regexp.MustCompile("v1.9") - if re.FindString(version) != "" { - dashboardPort = 443 - } - port := s.GetNodePort(dashboardPort) + if !eng.HasWindowsAgents() { + By("Gathering connection information") + dashboardPort := 80 + version, err := node.Version() + Expect(err).NotTo(HaveOccurred()) + re := regexp.MustCompile("v1.9") + if re.FindString(version) != "" { + dashboardPort = 443 + } + port := s.GetNodePort(dashboardPort) - master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) - nodeList, err := node.Get() - Expect(err).NotTo(HaveOccurred()) + kubeConfig, err := GetConfig() + Expect(err).NotTo(HaveOccurred()) + master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) - if !eng.HasWindowsAgents() { + sshKeyPath := cfg.GetSSHKeyPath() + + By("Ensuring that we can connect via HTTP to the dashboard on any one node") + nodeList, err := node.Get() + Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Nodes { success := false for i := 0; i < 60; i++ { dashboardURL := fmt.Sprintf("http://%s:%v", node.Status.GetAddressByType("InternalIP").Address, port) curlCMD := fmt.Sprintf("curl --max-time 60 %s", dashboardURL) - _, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput() + cmd := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err == nil { success = true break } if i > 58 { - log.Println(curlCMD) - log.Println(err.Error()) - log.Printf("%#v\n", err) + log.Printf("Error while connecting to Windows dashboard:%s\n", err) + log.Println(string(out)) } time.Sleep(10 * time.Second) } @@ -196,7 +204,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) It("should have rescheduler running", func() { - if eng.HasACIConnector() { + if eng.HasRescheduler() { running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) @@ -212,6 +220,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) re := regexp.MustCompile("v1.9") if eng.HasLinuxAgents() && re.FindString(version) == "" { + By("Creating a test php-apache deployment with request limit thresholds") // Inspired by http://blog.kubernetes.io/2016/07/autoscaling-in-kubernetes.html r := rand.New(rand.NewSource(time.Now().UnixNano())) phpApacheName := fmt.Sprintf("php-apache-%s-%v", cfg.Name, r.Intn(99999)) @@ -221,6 +230,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } Expect(err).NotTo(HaveOccurred()) + By("Ensuring that one php-apache pod is running at the beginning") running, err := pod.WaitOnReady(phpApacheName, "default", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) @@ -230,20 +240,29 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu // We should have exactly 1 pod to begin Expect(len(phpPods)).To(Equal(1)) + By("Exposing TCP 80 internally on the php-apache deployment") err = phpApacheDeploy.Expose("ClusterIP", 80, 80) Expect(err).NotTo(HaveOccurred()) s, err := service.Get(phpApacheName, "default") Expect(err).NotTo(HaveOccurred()) + By("Assigning hpa configuration to the php-apache deployment") // Apply autoscale characteristics to deployment - _, err = exec.Command("kubectl", "autoscale", "deployment", phpApacheName, "--cpu-percent=5", "--min=1", "--max=10").CombinedOutput() + cmd := exec.Command("kubectl", "autoscale", "deployment", phpApacheName, "--cpu-percent=5", "--min=1", "--max=10") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() + if err != nil { + log.Printf("Error while configuring autoscale against deployment %s:%s\n", phpApacheName, string(out)) + } Expect(err).NotTo(HaveOccurred()) + By("Before sending load we should have one Running php-apache pod") phpPods, err = phpApacheDeploy.Pods() Expect(err).NotTo(HaveOccurred()) // We should still have exactly 1 pod after autoscale config but before load Expect(len(phpPods)).To(Equal(1)) + By("Sending load to the php-apache service by creating a 3 replica deployment") // Launch a simple busybox pod that wget's continuously to the apache serviceto simulate load commandString := fmt.Sprintf("while true; do wget -q -O- http://%s.default.svc.cluster.local; done", phpApacheName) loadTestName := fmt.Sprintf("load-test-%s-%v", cfg.Name, r.Intn(99999)) @@ -251,6 +270,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu loadTestDeploy, err := deployment.RunLinuxDeploy("busybox", loadTestName, "default", commandString, numLoadTestPods) Expect(err).NotTo(HaveOccurred()) + By("Ensuring there are 3 load test pods") running, err = pod.WaitOnReady(loadTestName, "default", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) @@ -260,9 +280,11 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) Expect(len(loadTestPods)).To(Equal(numLoadTestPods)) + By("Waiting 3 minutes for load to take effect") // Wait 3 minutes for autoscaler to respond to load time.Sleep(3 * time.Minute) + By("Ensuring we have more than 1 apache-php pods due to hpa enforcement") phpPods, err = phpApacheDeploy.Pods() Expect(err).NotTo(HaveOccurred()) // We should have > 1 pods after autoscale effects @@ -279,27 +301,31 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should be able to deploy an nginx service", func() { if eng.HasLinuxAgents() { + By("Creating a nginx deployment") r := rand.New(rand.NewSource(time.Now().UnixNano())) deploymentName := fmt.Sprintf("nginx-%s-%v", cfg.Name, r.Intn(99999)) nginxDeploy, err := deployment.CreateLinuxDeploy("library/nginx:latest", deploymentName, "default", "") Expect(err).NotTo(HaveOccurred()) + By("Ensure there is a Running nginx pod") running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) + By("Exposing TCP 80 LB on the nginx deployment") err = nginxDeploy.Expose("LoadBalancer", 80, 80) Expect(err).NotTo(HaveOccurred()) - s, err := service.Get(deploymentName, "default") Expect(err).NotTo(HaveOccurred()) s, err = s.WaitForExternalIP(cfg.Timeout, 5*time.Second) Expect(err).NotTo(HaveOccurred()) Expect(s.Status.LoadBalancer.Ingress).NotTo(BeEmpty()) + By("Ensuring we can connect to the service") valid := s.Validate("(Welcome to nginx)", 5, 5*time.Second) Expect(valid).To(BeTrue()) + By("Ensuring we have outbound internet access from the nginx pods") nginxPods, err := nginxDeploy.Pods() Expect(err).NotTo(HaveOccurred()) Expect(len(nginxPods)).ToNot(BeZero()) diff --git a/test/e2e/kubernetes/util/util.go b/test/e2e/kubernetes/util/util.go new file mode 100644 index 0000000000..385422c299 --- /dev/null +++ b/test/e2e/kubernetes/util/util.go @@ -0,0 +1,12 @@ +package util + +import ( + "fmt" + "os/exec" + "strings" +) + +// PrintCommand prints a command string +func PrintCommand(cmd *exec.Cmd) { + fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " ")) +} diff --git a/test/e2e/runner/ginkgo.go b/test/e2e/runner/ginkgo.go index 672dba2e8f..38205b34e0 100644 --- a/test/e2e/runner/ginkgo.go +++ b/test/e2e/runner/ginkgo.go @@ -33,7 +33,7 @@ func BuildGinkgoRunner(cfg *config.Config, pt *metrics.Point) (*Ginkgo, error) { func (g *Ginkgo) Run() error { g.Point.SetTestStart() testDir := fmt.Sprintf("test/e2e/%s", g.Config.Orchestrator) - cmd := exec.Command("ginkgo", "-slowSpecThreshold", "180", "-r", testDir) + cmd := exec.Command("ginkgo", "-slowSpecThreshold", "180", "-r", "-v", testDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Start() From 30fd6011974e10b8853ea1759fa890c5e261315e Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 25 Jan 2018 17:42:10 -0800 Subject: [PATCH 4/9] deployment flows need expanded cluster definition --- test/e2e/runner/cli_provisioner.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index 29c591bb4b..2079dd3181 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -133,6 +133,17 @@ func (cli *CLIProvisioner) provision() error { return fmt.Errorf("Error while trying to generate acs-engine template:%s", err) } + c, err := config.ParseConfig() + engCfg, err := engine.ParseConfig(cli.Config.CurrentWorkingDir, c.ClusterDefinition, c.Name) + if err != nil { + return fmt.Errorf("unable to parse config") + } + csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath + "/apimodel.json") + if err != nil { + return fmt.Errorf("unable to parse output") + } + cli.Engine.ExpandedDefinition = csGenerated + // Lets start by just using the normal az group deployment cli for creating a cluster err = cli.Account.CreateDeployment(cli.Config.Name, eng) if err != nil { From 1a3643d02ee4ae7eaeeb77caf947ebbfaa426d0f Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 25 Jan 2018 20:34:47 -0800 Subject: [PATCH 5/9] reverting to ClusterDefinition for node counts --- test/e2e/engine/template.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index d712c8cc6a..a83c7eebb2 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -131,8 +131,8 @@ func Build(cfg *config.Config, subnetID string) (*Engine, error) { // NodeCount returns the number of nodes that should be provisioned for a given cluster definition func (e *Engine) NodeCount() int { - expectedCount := e.ExpandedDefinition.Properties.MasterProfile.Count - for _, pool := range e.ExpandedDefinition.Properties.AgentPoolProfiles { + expectedCount := e.ClusterDefinition.Properties.MasterProfile.Count + for _, pool := range e.ClusterDefinition.Properties.AgentPoolProfiles { expectedCount = expectedCount + pool.Count } return expectedCount From a1dc3737225aca40c56c78a46785f4efd111dcdd Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 26 Jan 2018 08:57:04 -0800 Subject: [PATCH 6/9] standard stdout implementation for all commands --- test/e2e/azure/cli.go | 45 +++++++++++++------ test/e2e/engine/cli.go | 6 ++- test/e2e/kubernetes/config.go | 6 ++- test/e2e/kubernetes/namespace/namespace.go | 14 ++++-- test/e2e/kubernetes/node/node.go | 10 ++++- .../persistentvolumeclaims.go | 10 ++++- test/e2e/kubernetes/pod/pod.go | 34 +++++++++----- test/e2e/kubernetes/service/service.go | 10 ++++- .../kubernetes/storageclass/storageclass.go | 10 ++++- test/e2e/remote/ssh.go | 13 ++++-- test/e2e/runner/cli_provisioner.go | 12 ++++- test/e2e/runner/ginkgo.go | 12 +++-- 12 files changed, 135 insertions(+), 47 deletions(-) diff --git a/test/e2e/azure/cli.go b/test/e2e/azure/cli.go index 81bbbddc3b..c44925dc71 100644 --- a/test/e2e/azure/cli.go +++ b/test/e2e/azure/cli.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Azure/acs-engine/test/e2e/engine" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" "github.com/kelseyhightower/envconfig" ) @@ -70,8 +71,12 @@ func (a *Account) Login() error { // SetSubscription will call az account set --subscription for the given Account func (a *Account) SetSubscription() error { - _, err := exec.Command("az", "account", "set", "--subscription", a.SubscriptionID).CombinedOutput() + cmd := exec.Command("az", "account", "set", "--subscription", a.SubscriptionID) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { + log.Printf("Error while trying to set subscription (%s):%s", a.SubscriptionID, err) + log.Printf("Output:%s\n", out) return err } return nil @@ -81,9 +86,11 @@ func (a *Account) SetSubscription() error { //--tags "type=${RESOURCE_GROUP_TAG_TYPE:-}" "now=$(date +%s)" "job=${JOB_BASE_NAME:-}" "buildno=${BUILD_NUM:-}" func (a *Account) CreateGroup(name, location string) error { now := fmt.Sprintf("now=%v", time.Now().Unix()) - out, err := exec.Command("az", "group", "create", "--name", name, "--location", location, "--tags", now).CombinedOutput() + cmd := exec.Command("az", "group", "create", "--name", name, "--location", location, "--tags", now) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { - log.Printf("Error while trying create resource group (%s) in %s:%s", name, location, err) + log.Printf("Error while trying to create resource group (%s) in %s:%s", name, location, err) log.Printf("Output:%s\n", out) return err } @@ -97,13 +104,14 @@ func (a *Account) CreateGroup(name, location string) error { // DeleteGroup deletes a given resource group by name func (a *Account) DeleteGroup(name string, wait bool) error { - var out []byte - var err error + var cmd *exec.Cmd if !wait { - out, err = exec.Command("az", "group", "delete", "--name", name, "--no-wait", "--yes").CombinedOutput() + cmd = exec.Command("az", "group", "delete", "--name", name, "--no-wait", "--yes") } else { - out, err = exec.Command("az", "group", "delete", "--name", name, "--yes").CombinedOutput() + cmd = exec.Command("az", "group", "delete", "--name", name, "--yes") } + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to delete resource group (%s):%s", name, out) return err @@ -134,14 +142,16 @@ func (a *Account) CreateDeployment(name string, e *engine.Engine) error { } }() - output, err := exec.Command("az", "group", "deployment", "create", + cmd := exec.Command("az", "group", "deployment", "create", "--name", d.Name, "--resource-group", a.ResourceGroup.Name, "--template-file", e.Config.GeneratedTemplatePath, - "--parameters", e.Config.GeneratedParametersPath).CombinedOutput() + "--parameters", e.Config.GeneratedParametersPath) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("\nError from deployment for %s in resource group %s:%s\n", d.Name, a.ResourceGroup.Name, err) - log.Printf("Command Output: %s\n", output) + log.Printf("Command Output: %s\n", out) return err } quit <- true @@ -151,7 +161,9 @@ func (a *Account) CreateDeployment(name string, e *engine.Engine) error { // GetCurrentAccount will run an az account show and parse that into an account strcut func GetCurrentAccount() (*Account, error) { - out, err := exec.Command("az", "account", "show").CombinedOutput() + cmd := exec.Command("az", "account", "show") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'account show':%s\n", err) return nil, err @@ -167,7 +179,9 @@ func GetCurrentAccount() (*Account, error) { // CreateVnet will create a vnet in a resource group func (a *Account) CreateVnet(vnet, addressPrefixes, subnetName, subnetPrefix string) error { - out, err := exec.Command("az", "network", "vnet", "create", "-g", a.ResourceGroup.Name, "-n", vnet, "--address-prefixes", addressPrefixes, "--subnet-name", subnetName, "--subnet-prefix", subnetPrefix).CombinedOutput() + cmd := exec.Command("az", "network", "vnet", "create", "-g", a.ResourceGroup.Name, "-n", vnet, "--address-prefixes", addressPrefixes, "--subnet-name", subnetName, "--subnet-prefix", subnetPrefix) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to create vnet with the following command:\n az network vnet create -g %s -n %s --address-prefixes %s --subnet-name %s --subnet-prefix %s \n Output:%s\n", a.ResourceGroup.Name, vnet, addressPrefixes, subnetName, subnetPrefix, out) return err @@ -186,7 +200,8 @@ type RouteTable struct { // UpdateRouteTables is used to updated a vnet with the appropriate route tables func (a *Account) UpdateRouteTables(subnet, vnet string) error { - out, err := exec.Command("az", "network", "route-table", "list", "-g", a.ResourceGroup.Name).CombinedOutput() + cmd := exec.Command("az", "network", "route-table", "list", "-g", a.ResourceGroup.Name) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to get route table list!\n Output:%s\n", out) return err @@ -194,7 +209,9 @@ func (a *Account) UpdateRouteTables(subnet, vnet string) error { rts := []RouteTable{} json.Unmarshal(out, &rts) - out, err = exec.Command("az", "network", "vnet", "subnet", "update", "-n", subnet, "-g", a.ResourceGroup.Name, "--vnet-name", vnet, "--route-table", rts[0].Name).CombinedOutput() + cmd = exec.Command("az", "network", "vnet", "subnet", "update", "-n", subnet, "-g", a.ResourceGroup.Name, "--vnet-name", vnet, "--route-table", rts[0].Name) + util.PrintCommand(cmd) + out, err = cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to update vnet route tables:%s\n", out) return err diff --git a/test/e2e/engine/cli.go b/test/e2e/engine/cli.go index 06bff13f97..030149f3e2 100644 --- a/test/e2e/engine/cli.go +++ b/test/e2e/engine/cli.go @@ -3,11 +3,15 @@ package engine import ( "log" "os/exec" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // Generate will run acs-engine generate on a given cluster definition func (e *Engine) Generate() error { - out, err := exec.Command("./bin/acs-engine", "generate", e.Config.ClusterDefinitionTemplate, "--output-directory", e.Config.GeneratedDefinitionPath).CombinedOutput() + cmd := exec.Command("./bin/acs-engine", "generate", e.Config.ClusterDefinitionTemplate, "--output-directory", e.Config.GeneratedDefinitionPath) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to generate acs-engine template with cluster definition - %s: %s\n", e.Config.ClusterDefinitionTemplate, err) log.Printf("Command:./bin/acs-engine generate %s --output-directory %s\n", e.Config.ClusterDefinitionTemplate, e.Config.GeneratedDefinitionPath) diff --git a/test/e2e/kubernetes/config.go b/test/e2e/kubernetes/config.go index 044103fe05..f2509d35e9 100644 --- a/test/e2e/kubernetes/config.go +++ b/test/e2e/kubernetes/config.go @@ -5,6 +5,8 @@ import ( "log" "os/exec" "strings" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // Config represents a kubernetes config object @@ -25,7 +27,9 @@ type ClusterInfo struct { // GetConfig returns a Config value representing the current kubeconfig func GetConfig() (*Config, error) { - out, err := exec.Command("kubectl", "config", "view", "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "config", "view", "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'kubectl config view':%s\n", err) return nil, err diff --git a/test/e2e/kubernetes/namespace/namespace.go b/test/e2e/kubernetes/namespace/namespace.go index eb0fd05251..19717b451c 100644 --- a/test/e2e/kubernetes/namespace/namespace.go +++ b/test/e2e/kubernetes/namespace/namespace.go @@ -5,6 +5,8 @@ import ( "log" "os/exec" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // Namespace holds namespace metadata @@ -20,7 +22,9 @@ type Metadata struct { // Create a namespace with the given name func Create(name string) (*Namespace, error) { - out, err := exec.Command("kubectl", "create", "namespace", name).CombinedOutput() + cmd := exec.Command("kubectl", "create", "namespace", name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to create namespace (%s):%s\n", name, string(out)) return nil, err @@ -30,7 +34,9 @@ func Create(name string) (*Namespace, error) { // Get returns a namespace for with a given name func Get(name string) (*Namespace, error) { - out, err := exec.Command("kubectl", "get", "namespace", name, "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "namespace", name, "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to get namespace (%s):%s\n", name, string(out)) return nil, err @@ -45,7 +51,9 @@ func Get(name string) (*Namespace, error) { // Delete a namespace func (n *Namespace) Delete() error { - out, err := exec.Command("kubectl", "delete", "namespace", n.Metadata.Name).CombinedOutput() + cmd := exec.Command("kubectl", "delete", "namespace", n.Metadata.Name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to delete namespace (%s):%s\n", n.Metadata.Name, out) return err diff --git a/test/e2e/kubernetes/node/node.go b/test/e2e/kubernetes/node/node.go index 21ff5c37d0..19c7b87d78 100644 --- a/test/e2e/kubernetes/node/node.go +++ b/test/e2e/kubernetes/node/node.go @@ -9,6 +9,8 @@ import ( "regexp" "strings" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) const ( @@ -113,7 +115,9 @@ func WaitOnReady(nodeCount int, sleep, duration time.Duration) bool { // Get returns the current nodes for a given kubeconfig func Get() (*List, error) { - out, err := exec.Command("kubectl", "get", "nodes", "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "nodes", "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'kubectl get nodes':%s", string(out)) return nil, err @@ -128,7 +132,9 @@ func Get() (*List, error) { // Version get the version of the server func Version() (string, error) { - out, err := exec.Command("kubectl", "version", "--short").CombinedOutput() + cmd := exec.Command("kubectl", "version", "--short") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'kubectl version':%s", string(out)) return "", err diff --git a/test/e2e/kubernetes/persistentvolumeclaims/persistentvolumeclaims.go b/test/e2e/kubernetes/persistentvolumeclaims/persistentvolumeclaims.go index 43a14de761..1390821efc 100644 --- a/test/e2e/kubernetes/persistentvolumeclaims/persistentvolumeclaims.go +++ b/test/e2e/kubernetes/persistentvolumeclaims/persistentvolumeclaims.go @@ -7,6 +7,8 @@ import ( "log" "os/exec" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // PersistentVolumeClaims is used to parse data from kubectl get pvc @@ -36,7 +38,9 @@ type Status struct { // CreatePersistentVolumeClaimsFromFile will create a StorageClass from file with a name func CreatePersistentVolumeClaimsFromFile(filename, name, namespace string) (*PersistentVolumeClaims, error) { - out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput() + cmd := exec.Command("kubectl", "apply", "-f", filename) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to create PersistentVolumeClaims %s in namespace %s:%s\n", name, namespace, string(out)) return nil, err @@ -51,7 +55,9 @@ func CreatePersistentVolumeClaimsFromFile(filename, name, namespace string) (*Pe // Get will return a PersistentVolumeClaims with a given name and namespace func Get(pvcName, namespace string) (*PersistentVolumeClaims, error) { - out, err := exec.Command("kubectl", "get", "pvc", pvcName, "-n", namespace, "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "pvc", pvcName, "-n", namespace, "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { return nil, err } diff --git a/test/e2e/kubernetes/pod/pod.go b/test/e2e/kubernetes/pod/pod.go index 384f3297b9..ff7ea00f5b 100644 --- a/test/e2e/kubernetes/pod/pod.go +++ b/test/e2e/kubernetes/pod/pod.go @@ -9,6 +9,8 @@ import ( "regexp" "strings" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) const ( @@ -62,7 +64,9 @@ type Status struct { // CreatePodFromFile will create a Pod from file with a name func CreatePodFromFile(filename, name, namespace string) (*Pod, error) { - out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput() + cmd := exec.Command("kubectl", "apply", "-f", filename) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to create Pod %s:%s\n", name, string(out)) return nil, err @@ -77,7 +81,9 @@ func CreatePodFromFile(filename, name, namespace string) (*Pod, error) { // GetAll will return all pods in a given namespace func GetAll(namespace string) (*List, error) { - out, err := exec.Command("kubectl", "get", "pods", "-n", namespace, "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "pods", "-n", namespace, "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { return nil, err } @@ -92,7 +98,9 @@ func GetAll(namespace string) (*List, error) { // Get will return a pod with a given name and namespace func Get(podName, namespace string) (*Pod, error) { - out, err := exec.Command("kubectl", "get", "pods", podName, "-n", namespace, "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "pods", podName, "-n", namespace, "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { return nil, err } @@ -206,15 +214,17 @@ func (p *Pod) WaitOnReady(sleep, duration time.Duration) (bool, error) { } // Exec will execute the given command in the pod -func (p *Pod) Exec(cmd ...string) ([]byte, error) { +func (p *Pod) Exec(c ...string) ([]byte, error) { execCmd := []string{"exec", p.Metadata.Name, "-n", p.Metadata.Namespace} - for _, s := range cmd { + for _, s := range c { execCmd = append(execCmd, s) } - out, err := exec.Command("kubectl", execCmd...).CombinedOutput() + cmd := exec.Command("kubectl", execCmd...) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'kubectl exec':%s\n", string(out)) - log.Printf("Command:kubectl exec %s -n %s %s \n", p.Metadata.Name, p.Metadata.Namespace, cmd) + log.Printf("Command:kubectl exec %s -n %s %s \n", p.Metadata.Name, p.Metadata.Namespace, c) return nil, err } return out, nil @@ -222,7 +232,9 @@ func (p *Pod) Exec(cmd ...string) ([]byte, error) { // Delete will delete a Pod in a given namespace func (p *Pod) Delete() error { - out, err := exec.Command("kubectl", "delete", "po", "-n", p.Metadata.Namespace, p.Metadata.Name).CombinedOutput() + cmd := exec.Command("kubectl", "delete", "po", "-n", p.Metadata.Namespace, p.Metadata.Name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to delete Pod %s in namespace %s:%s\n", p.Metadata.Namespace, p.Metadata.Name, string(out)) return err @@ -337,9 +349,11 @@ func (p *Pod) ValidateHostPort(check string, attempts int, sleep time.Duration, curlCMD := fmt.Sprintf("curl --max-time 60 %s", url) for i := 0; i < attempts; i++ { - resp, err := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD).CombinedOutput() + cmd := exec.Command("ssh", "-i", sshKeyPath, "-o", "ConnectTimeout=10", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", master, curlCMD) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err == nil { - matched, _ := regexp.MatchString(check, string(resp)) + matched, _ := regexp.MatchString(check, string(out)) if matched == true { return true } diff --git a/test/e2e/kubernetes/service/service.go b/test/e2e/kubernetes/service/service.go index 506e7ad31c..7837678b9e 100644 --- a/test/e2e/kubernetes/service/service.go +++ b/test/e2e/kubernetes/service/service.go @@ -10,6 +10,8 @@ import ( "os/exec" "regexp" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // Service represents a kubernetes service @@ -54,7 +56,9 @@ type LoadBalancer struct { // Get returns the service definition specified in a given namespace func Get(name, namespace string) (*Service, error) { - out, err := exec.Command("kubectl", "get", "svc", "-o", "json", "-n", namespace, name).CombinedOutput() + cmd := exec.Command("kubectl", "get", "svc", "-o", "json", "-n", namespace, name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to run 'kubectl get svc':%s\n", string(out)) return nil, err @@ -70,7 +74,9 @@ func Get(name, namespace string) (*Service, error) { // Delete will delete a service in a given namespace func (s *Service) Delete() error { - out, err := exec.Command("kubectl", "delete", "svc", "-n", s.Metadata.Namespace, s.Metadata.Name).CombinedOutput() + cmd := exec.Command("kubectl", "delete", "svc", "-n", s.Metadata.Namespace, s.Metadata.Name) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error while trying to delete service %s in namespace %s:%s\n", s.Metadata.Namespace, s.Metadata.Name, string(out)) return err diff --git a/test/e2e/kubernetes/storageclass/storageclass.go b/test/e2e/kubernetes/storageclass/storageclass.go index bfbc6383d3..02a81d88e2 100644 --- a/test/e2e/kubernetes/storageclass/storageclass.go +++ b/test/e2e/kubernetes/storageclass/storageclass.go @@ -7,6 +7,8 @@ import ( "log" "os/exec" "time" + + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" ) // StorageClass is used to parse data from kubectl get storageclass @@ -28,7 +30,9 @@ type Parameters struct { // CreateStorageClassFromFile will create a StorageClass from file with a name func CreateStorageClassFromFile(filename, name string) (*StorageClass, error) { - out, err := exec.Command("kubectl", "apply", "-f", filename).CombinedOutput() + cmd := exec.Command("kubectl", "apply", "-f", filename) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error trying to create StorageClass %s:%s\n", name, string(out)) return nil, err @@ -43,7 +47,9 @@ func CreateStorageClassFromFile(filename, name string) (*StorageClass, error) { // Get will return a StorageClass with a given name and namespace func Get(scName string) (*StorageClass, error) { - out, err := exec.Command("kubectl", "get", "storageclass", scName, "-o", "json").CombinedOutput() + cmd := exec.Command("kubectl", "get", "storageclass", scName, "-o", "json") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { return nil, err } diff --git a/test/e2e/remote/ssh.go b/test/e2e/remote/ssh.go index bd4dd98baf..4ef0a41981 100644 --- a/test/e2e/remote/ssh.go +++ b/test/e2e/remote/ssh.go @@ -10,6 +10,7 @@ import ( "os/exec" "time" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" ) @@ -91,9 +92,11 @@ func (c *Connection) Execute(cmd string) ([]byte, error) { } func (c *Connection) Write(data, path string) error { - cmd := fmt.Sprintf("echo %s > %s", data, path) + remoteCommand := fmt.Sprintf("echo %s > %s", data, path) connectString := fmt.Sprintf("%s@%s", c.User, c.Host) - out, err := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, cmd).CombinedOutput() + cmd := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, remoteCommand) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error output:%s\n", out) return err @@ -102,9 +105,11 @@ func (c *Connection) Write(data, path string) error { } func (c *Connection) Read(path string) ([]byte, error) { - cmd := fmt.Sprintf("cat %s", path) + remoteCommand := fmt.Sprintf("cat %s", path) connectString := fmt.Sprintf("%s@%s", c.User, c.Host) - out, err := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, cmd).CombinedOutput() + cmd := exec.Command("ssh", "-i", c.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, "-p", c.Port, remoteCommand) + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { log.Printf("Error output:%s\n", out) return nil, err diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index 2079dd3181..f9ff3ea3bd 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/acs-engine/test/e2e/dcos" "github.com/Azure/acs-engine/test/e2e/engine" "github.com/Azure/acs-engine/test/e2e/kubernetes/node" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" "github.com/Azure/acs-engine/test/e2e/metrics" "github.com/Azure/acs-engine/test/e2e/remote" "github.com/kelseyhightower/envconfig" @@ -86,11 +87,18 @@ func (cli *CLIProvisioner) provision() error { os.Mkdir(outputPath, 0755) if cli.Config.SoakClusterName == "" { - out, err := exec.Command("ssh-keygen", "-f", cli.Config.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa").CombinedOutput() + cmd := exec.Command("ssh-keygen", "-f", cli.Config.GetSSHKeyPath(), "-q", "-N", "", "-b", "2048", "-t", "rsa") + util.PrintCommand(cmd) + out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Error while trying to generate ssh key:%s\nOutput:%s", err, out) } - exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") + cmd = exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") + util.PrintComand(cmd) + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error while setting mode perms on ssh key:%s\nOutput:%s", err, out) + } } publicSSHKey, err := cli.Config.ReadPublicSSHKey() diff --git a/test/e2e/runner/ginkgo.go b/test/e2e/runner/ginkgo.go index 38205b34e0..0b94019edb 100644 --- a/test/e2e/runner/ginkgo.go +++ b/test/e2e/runner/ginkgo.go @@ -7,6 +7,7 @@ import ( "os/exec" "github.com/Azure/acs-engine/test/e2e/config" + "github.com/Azure/acs-engine/test/e2e/kubernetes/util" "github.com/Azure/acs-engine/test/e2e/metrics" "github.com/kelseyhightower/envconfig" ) @@ -34,6 +35,7 @@ func (g *Ginkgo) Run() error { g.Point.SetTestStart() testDir := fmt.Sprintf("test/e2e/%s", g.Config.Orchestrator) cmd := exec.Command("ginkgo", "-slowSpecThreshold", "180", "-r", "-v", testDir) + util.PrintCommand(cmd) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Start() @@ -47,10 +49,12 @@ func (g *Ginkgo) Run() error { if err != nil { g.Point.RecordTestError() if g.Config.IsKubernetes() { - out, _ := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide").CombinedOutput() - log.Printf("Running kubectl get all:\n%s\n", out) - out, _ = exec.Command("kubectl", "get", "nodes", "-o", "wide").CombinedOutput() - log.Printf("Running kubectl get nodes:\n%s\n", out) + kubectl := exec.Command("kubectl", "get", "all", "--all-namespaces", "-o", "wide") + util.PrintCommand(kubectl) + kubectl.CombinedOutput() + kubectl = exec.Command("kubectl", "get", "nodes", "-o", "wide") + util.PrintCommand(kubectl) + kubectl.CombinedOutput() } return err } From 272f990fd0cb93c725ac13321eb7c7cf39b0cdc0 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 26 Jan 2018 09:06:32 -0800 Subject: [PATCH 7/9] typo --- test/e2e/runner/cli_provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index f9ff3ea3bd..cd2ea01907 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -94,7 +94,7 @@ func (cli *CLIProvisioner) provision() error { return fmt.Errorf("Error while trying to generate ssh key:%s\nOutput:%s", err, out) } cmd = exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") - util.PrintComand(cmd) + util.PrintCommand(cmd) out, err = cmd.CombinedOutput() if err != nil { return fmt.Errorf("Error while setting mode perms on ssh key:%s\nOutput:%s", err, out) From 134cdd865a6d96c3b17537a24403c47677ad3480 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 26 Jan 2018 09:15:43 -0800 Subject: [PATCH 8/9] disable broken chmod command --- test/e2e/runner/cli_provisioner.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index cd2ea01907..a7a8eb65ab 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -93,12 +93,12 @@ func (cli *CLIProvisioner) provision() error { if err != nil { return fmt.Errorf("Error while trying to generate ssh key:%s\nOutput:%s", err, out) } - cmd = exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") + /*cmd = exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") util.PrintCommand(cmd) out, err = cmd.CombinedOutput() if err != nil { return fmt.Errorf("Error while setting mode perms on ssh key:%s\nOutput:%s", err, out) - } + }*/ } publicSSHKey, err := cli.Config.ReadPublicSSHKey() From b2a3e25b9a41fcde97b15e83839371c201edf0ba Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 26 Jan 2018 09:52:55 -0800 Subject: [PATCH 9/9] stdout tweaks --- test/e2e/azure/cli.go | 1 - test/e2e/kubernetes/kubernetes_test.go | 25 +++++++++++++++---------- test/e2e/kubernetes/pod/pod.go | 2 +- test/e2e/runner/cli_provisioner.go | 7 ------- 4 files changed, 16 insertions(+), 19 deletions(-) diff --git a/test/e2e/azure/cli.go b/test/e2e/azure/cli.go index c44925dc71..de13bf5086 100644 --- a/test/e2e/azure/cli.go +++ b/test/e2e/azure/cli.go @@ -121,7 +121,6 @@ func (a *Account) DeleteGroup(name string, wait bool) error { // CreateDeployment will deploy a cluster to a given resource group using the template and parameters on disk func (a *Account) CreateDeployment(name string, e *engine.Engine) error { - log.Print("Creating deployment, this will take a few minutes.") d := Deployment{ Name: name, TemplateDirectory: e.Config.GeneratedDefinitionPath, diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 9ebb0ab9a4..45cee1640a 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -148,7 +148,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) if !eng.HasWindowsAgents() { - By("Gathering connection information") + By("Gathering connection information to determine whether or not to connect via HTTP or HTTPS") dashboardPort := 80 version, err := node.Version() Expect(err).NotTo(HaveOccurred()) @@ -164,7 +164,11 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu sshKeyPath := cfg.GetSSHKeyPath() - By("Ensuring that we can connect via HTTP to the dashboard on any one node") + if dashboardPort == 80 { + By("Ensuring that we can connect via HTTP to the dashboard on any one node") + } else { + By("Ensuring that we can connect via HTTPS to the dashboard on any one node") + } nodeList, err := node.Get() Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Nodes { @@ -216,6 +220,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Describe("with a linux agent pool", func() { It("should be able to autoscale", func() { + By("Determining whether this version of Kubernetes can hpa autoscale") version, err := node.Version() Expect(err).NotTo(HaveOccurred()) re := regexp.MustCompile("v1.9") @@ -230,7 +235,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } Expect(err).NotTo(HaveOccurred()) - By("Ensuring that one php-apache pod is running at the beginning") + By("Ensuring that one php-apache pod is running before autoscale configuration or load applied") running, err := pod.WaitOnReady(phpApacheName, "default", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) @@ -256,12 +261,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } Expect(err).NotTo(HaveOccurred()) - By("Before sending load we should have one Running php-apache pod") - phpPods, err = phpApacheDeploy.Pods() - Expect(err).NotTo(HaveOccurred()) - // We should still have exactly 1 pod after autoscale config but before load - Expect(len(phpPods)).To(Equal(1)) - By("Sending load to the php-apache service by creating a 3 replica deployment") // Launch a simple busybox pod that wget's continuously to the apache serviceto simulate load commandString := fmt.Sprintf("while true; do wget -q -O- http://%s.default.svc.cluster.local; done", phpApacheName) @@ -290,12 +289,15 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu // We should have > 1 pods after autoscale effects Expect(len(phpPods) > 1).To(BeTrue()) + By("Cleaning up after ourselves") err = loadTestDeploy.Delete() Expect(err).NotTo(HaveOccurred()) err = phpApacheDeploy.Delete() Expect(err).NotTo(HaveOccurred()) err = s.Delete() Expect(err).NotTo(HaveOccurred()) + } else { + Skip("This flavor/version of Kubernetes doesn't support hpa autoscale") } }) @@ -315,13 +317,15 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu By("Exposing TCP 80 LB on the nginx deployment") err = nginxDeploy.Expose("LoadBalancer", 80, 80) Expect(err).NotTo(HaveOccurred()) + + By("Ensuring we can connect to the service") s, err := service.Get(deploymentName, "default") Expect(err).NotTo(HaveOccurred()) s, err = s.WaitForExternalIP(cfg.Timeout, 5*time.Second) Expect(err).NotTo(HaveOccurred()) Expect(s.Status.LoadBalancer.Ingress).NotTo(BeEmpty()) - By("Ensuring we can connect to the service") + By("Ensuring the service root URL returns the expected payload") valid := s.Validate("(Welcome to nginx)", 5, 5*time.Second) Expect(valid).To(BeTrue()) @@ -335,6 +339,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(pass).To(BeTrue()) } + By("Cleaning up after ourselves") err = nginxDeploy.Delete() Expect(err).NotTo(HaveOccurred()) err = s.Delete() diff --git a/test/e2e/kubernetes/pod/pod.go b/test/e2e/kubernetes/pod/pod.go index ff7ea00f5b..100d019e28 100644 --- a/test/e2e/kubernetes/pod/pod.go +++ b/test/e2e/kubernetes/pod/pod.go @@ -133,7 +133,7 @@ func GetAllByPrefix(prefix, namespace string) ([]Pod, error) { return pods, nil } -// AreAllPodsRunning will return true if all pods are in a Running State +// AreAllPodsRunning will return true if all pods in a given namespace are in a Running State func AreAllPodsRunning(podPrefix, namespace string) (bool, error) { pl, err := GetAll(namespace) if err != nil { diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index a7a8eb65ab..474129916e 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -81,7 +81,6 @@ func (cli *CLIProvisioner) provision() error { cli.Config.Name = cli.Config.SoakClusterName } os.Setenv("NAME", cli.Config.Name) - log.Printf("Cluster name:%s\n", cli.Config.Name) outputPath := filepath.Join(cli.Config.CurrentWorkingDir, "_output") os.Mkdir(outputPath, 0755) @@ -93,12 +92,6 @@ func (cli *CLIProvisioner) provision() error { if err != nil { return fmt.Errorf("Error while trying to generate ssh key:%s\nOutput:%s", err, out) } - /*cmd = exec.Command("chmod", "0600", cli.Config.GetSSHKeyPath()+"*") - util.PrintCommand(cmd) - out, err = cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Error while setting mode perms on ssh key:%s\nOutput:%s", err, out) - }*/ } publicSSHKey, err := cli.Config.ReadPublicSSHKey()