Skip to content

Commit

Permalink
Fix#0
Browse files Browse the repository at this point in the history
Signed-off-by: Parthvi Vala <[email protected]>
  • Loading branch information
valaparthvi committed Oct 25, 2024
1 parent b4158f5 commit ffbd374
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 55 deletions.
31 changes: 21 additions & 10 deletions hosted/aks/helper/helper_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,11 @@ func UpdateCluster(cluster *management.Cluster, client *rancher.Client, updateFu

// ====================================================================Azure CLI (start)=================================
// Create Azure AKS cluster using AZ CLI
func CreateAKSClusterOnAzure(location string, clusterName string, k8sVersion string, nodes string, tags map[string]string, clusterCreateArgs ...string) error {
func CreateAKSClusterOnAzure(location string, clusterName string, k8sVersion string, nodes string, tags map[string]string, extraArgs ...string) error {
formattedTags := convertMapToAKSString(tags)
// append tags
tagargs := []string{"--tags"}
tagargs = append(tagargs, formattedTags...)
fmt.Println("Creating AKS resource group ...")
rgargs := []string{"group", "create", "--location", location, "--resource-group", clusterName, "--subscription", subscriptionID}
fmt.Printf("Running command: az %v\n", rgargs)
Expand All @@ -474,8 +477,14 @@ func CreateAKSClusterOnAzure(location string, clusterName string, k8sVersion str
}

fmt.Println("Creating AKS cluster ...")
args := []string{"aks", "create", "--resource-group", clusterName, "--no-ssh-key", "--kubernetes-version", k8sVersion, "--enable-managed-identity", "--name", clusterName, "--subscription", subscriptionID, "--node-count", nodes, "--tags", formattedTags, "--location", location}
args = append(args, clusterCreateArgs...)
args := []string{"aks", "create", "--resource-group", clusterName, "--no-ssh-key", "--kubernetes-version", k8sVersion, "--enable-managed-identity", "--name", clusterName, "--subscription", subscriptionID, "--node-count", nodes, "--location", location}
// add tags
args = append(args, formattedTags...)

if len(extraArgs) > 0 {
args = append(args, extraArgs...)
}

fmt.Printf("Running command: az %v\n", args)
out, err = proc.RunW("az", args...)
if err != nil {
Expand Down Expand Up @@ -529,17 +538,20 @@ func ScaleNodePoolOnAzure(npName, clusterName, resourceGroupName, nodeCount stri
fmt.Printf("Running command: az %v\n", args)
out, err := proc.RunW("az", args...)
if err != nil {
return errors.Wrap(err, "Failed to add node pool: "+out)
return errors.Wrap(err, "Failed to scale node pool: "+out)
}
fmt.Println("Added node pool: ", npName)
fmt.Println("Scaled node pool: ", npName)
return nil
}

// UpdateClusterTagOnAzure updates the tags of an existing AKS cluster via CLI
func UpdateClusterTagOnAzure(tags map[string]string, clusterName, resourceGroupName string, extraArgs ...string) error {
formattedTags := convertMapToAKSString(tags)
tagsarg := []string{"--tags"}
tagsarg = append(tagsarg, formattedTags...)
fmt.Println("Adding tags on Azure ...")
args := []string{"aks", "update", "--tags", formattedTags, "--resource-group", resourceGroupName, "--cluster-name", clusterName, "--subscription", subscriptionID}
args := []string{"aks", "update", "--resource-group", resourceGroupName, "--name", clusterName, "--subscription", subscriptionID}
args = append(args, tagsarg...)
if len(extraArgs) > 0 {
args = append(args, extraArgs...)
}
Expand Down Expand Up @@ -587,11 +599,10 @@ func UpgradeAKSOnAzure(clusterName, resourceGroup, upgradeToVersion string, addi
}

// convertMapToAKSString converts the map of labels to a string format acceptable by azure CLI
// acceptable format: `--tags "owner=hostedproviders" "testname=sometest"`
func convertMapToAKSString(tags map[string]string) string {
var convertedString string
func convertMapToAKSString(tags map[string]string) []string {
var convertedString []string
for key, value := range tags {
convertedString += fmt.Sprintf("\"%s=%s\" ", key, value)
convertedString = append(convertedString, fmt.Sprintf("%s=%s", key, value))
}
return convertedString
}
Expand Down
102 changes: 58 additions & 44 deletions hosted/aks/p1/p1_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package p1_test

import (
"fmt"
"strconv"
"strings"
"testing"

Expand Down Expand Up @@ -587,25 +586,29 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
}
return allUpgraded
}, "6m", "10s").Should(BeTrue())
Expect(*cluster.AKSConfig.KubernetesVersion).To(Equal(upgradeToVersion))
for _, nodepool := range cluster.AKSConfig.NodePools {
Expect(*nodepool.OrchestratorVersion).To(Equal(upgradeToVersion))

// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
Expect(*cluster.AKSConfig.KubernetesVersion).To(Equal(upgradeToVersion))
for _, nodepool := range cluster.AKSConfig.NodePools {
Expect(*nodepool.OrchestratorVersion).To(Equal(upgradeToVersion))
}
}
})

const (
npName = "syncnodepool"
nodeCount = 1
npName = "syncnodepool"
nodeCount int64 = 1
)
currentNPCount := len(cluster.AKSConfig.NodePools)
By("adding a nodepool", func() {
err := helper.AddNodePoolOnAzure(npName, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup, strconv.Itoa(nodeCount))
By("Adding a nodepool", func() {
err := helper.AddNodePoolOnAzure(npName, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup, fmt.Sprint(nodeCount))
Expect(err).To(BeNil())
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
if len(cluster.AKSStatus.UpstreamSpec.NodePools) != currentNPCount+1 {
// Return early
if len(cluster.AKSStatus.UpstreamSpec.NodePools) == currentNPCount {
// Return early if the nodepool count hasn't changed.
return false
}
for _, nodepool := range cluster.AKSStatus.UpstreamSpec.NodePools {
Expand All @@ -614,14 +617,17 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
}
}
return false
}, "6m", "10s").Should(BeTrue(), "Timed out while waiting for new nodepool to appear in UpstreamSpec...")
}, "7m", "10s").Should(BeTrue(), "Timed out while waiting for new nodepool to appear in UpstreamSpec...")

Expect(cluster.AKSConfig.NodePools).To(HaveLen(currentNPCount + 1))
// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
Expect(cluster.AKSConfig.NodePools).To(HaveLen(currentNPCount + 1))
}
})

By("Scaling the nodepool", func() {
const scaleCount = nodeCount + 2
err := helper.ScaleNodePoolOnAzure(npName, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup, strconv.Itoa(scaleCount))
err := helper.ScaleNodePoolOnAzure(npName, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup, fmt.Sprint(scaleCount))
Expect(err).To(BeNil())
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expand All @@ -632,22 +638,26 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
}
}
return false
}, "6m", "10s").Should(BeTrue(), "Timed out while waiting for Scale up to appear in UpstreamSpec...")
for _, nodepool := range cluster.AKSConfig.NodePools {
if *nodepool.Name == npName {
Expect(*nodepool.Count).To(Equal(scaleCount))
}, "7m", "10s").Should(BeTrue(), "Timed out while waiting for Scale up to appear in UpstreamSpec...")

// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
for _, nodepool := range cluster.AKSConfig.NodePools {
if *nodepool.Name == npName {
Expect(*nodepool.Count).To(Equal(scaleCount))
}
}
}
})

By("deleting nodepool", func() {
By("Deleting a nodepool", func() {
err := helper.DeleteNodePoolOnAzure(npName, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup)
Expect(err).To(BeNil())
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
if len(cluster.AKSStatus.UpstreamSpec.NodePools) != currentNPCount {
// Return early
// Return early if the nodepool count is not back to its original state
return false
}
for _, nodepool := range cluster.AKSStatus.UpstreamSpec.NodePools {
Expand All @@ -656,25 +666,31 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
}
}
return true
}, "6m", "10s").Should(BeTrue(), "Timed out while waiting for nodepool deletion to appear in UpstreamSpec...")
Expect(cluster.AKSConfig.NodePools).To(HaveLen(currentNPCount))
}, "7m", "10s").Should(BeTrue(), "Timed out while waiting for nodepool deletion to appear in UpstreamSpec...")

// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
Expect(cluster.AKSConfig.NodePools).To(HaveLen(currentNPCount))
}
})

var originalTags map[string]string
var originalTags = map[string]string{}
for key, value := range cluster.AKSConfig.Tags {
originalTags[key] = value
}

By("Adding tags to cluster", func() {
updatedTags := cluster.AKSConfig.Tags
updatedTags["foo"] = "bar"
updatedTags["empty-tag"] = ""
updatedTags["empty-tags"] = ""

err := helper.UpdateClusterTagOnAzure(updatedTags, cluster.AKSConfig.ClusterName, cluster.AKSConfig.ResourceGroup)
Expect(err).To(BeNil())
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
if len(cluster.AKSStatus.UpstreamSpec.Tags) != len(updatedTags) {
// Return early
// Return early if tag length in Rancher hasn't changed; new tags haven't synced back to Rancher
return false
}
for key, value := range updatedTags {
Expand All @@ -683,11 +699,14 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
}
}
return true
}, "6m", "10s").Should(BeTrue(), "Timed out while waiting for tags addition to appear in UpstreamSpec...")
}, "7m", "10s").Should(BeTrue(), "Timed out while waiting for tags addition to appear in UpstreamSpec...")

Expect(len(cluster.AKSConfig.Tags)).To(Equal(len(updatedTags)))
for key, value := range updatedTags {
Expect(cluster.AKSConfig.Tags).To(HaveKeyWithValue(key, value))
// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
Expect(len(cluster.AKSConfig.Tags)).To(Equal(len(updatedTags)))
for key, value := range updatedTags {
Expect(cluster.AKSConfig.Tags).To(HaveKeyWithValue(key, value))
}
}
})

Expand All @@ -697,23 +716,18 @@ func azureSyncCheck(cluster *management.Cluster, client *rancher.Client, upgrade
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
if len(cluster.AKSStatus.UpstreamSpec.Tags) != len(originalTags) {
// Return early
return false
}
for key := range originalTags {
if _, exists := cluster.AKSStatus.UpstreamSpec.Tags[key]; exists {
return false
}
}
return true
}, "6m", "10s").Should(BeTrue(), "Timed out while waiting for tags deletion to appear in UpstreamSpec...")
return len(cluster.AKSStatus.UpstreamSpec.Tags) == len(originalTags)
}, "7m", "10s").Should(BeTrue(), "Timed out while waiting for tags deletion to appear in UpstreamSpec...")

Expect(len(cluster.AKSConfig.Tags)).To(Equal(len(originalTags)))
for key, value := range originalTags {
Expect(cluster.AKSConfig.Tags).ToNot(HaveKeyWithValue(key, value))
for key, value := range cluster.AKSStatus.UpstreamSpec.Tags {
Expect(cluster.AKSStatus.UpstreamSpec.Tags).To(HaveKeyWithValue(key, value))
}
// Check AKSConfig is the cluster is Rancher-provisioned
if !helpers.IsImport {
Expect(len(cluster.AKSConfig.Tags)).To(Equal(len(originalTags)))
for key, value := range originalTags {
Expect(cluster.AKSConfig.Tags).ToNot(HaveKeyWithValue(key, value))
}
}

})

}
1 change: 0 additions & 1 deletion hosted/eks/p1/sync_importing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ var _ = Describe("SyncImport", func() {

By("upgrading the ControlPlane & NodeGroup", func() {
syncK8sVersionUpgradeCheck(cluster, ctx.RancherAdminClient, true)

})
})

Expand Down

0 comments on commit ffbd374

Please sign in to comment.