Skip to content

Commit

Permalink
Automate Qase 210, 211, 212, 213
Browse files Browse the repository at this point in the history
Signed-off-by: Parthvi Vala <[email protected]>
  • Loading branch information
valaparthvi committed Nov 7, 2024
1 parent fa3f279 commit 93bbee5
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 1 deletion.
2 changes: 2 additions & 0 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ env:
AKS_CLIENT_SECRET: ${{ secrets.AKS_CLIENT_SECRET }}
AKS_SUBSCRIPTION_ID: ${{ secrets.AKS_SUBSCRIPTION_ID }}
AKS_TENANT_ID: ${{ secrets.AKS_TENANT_ID }}
AKS_VNET: ${{ secrets.AKS_VNET }}
AKS_VNET_RG: ${{ secrets.AKS_VNET_RG }}
GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }}
PROVIDER: ${{ inputs.hosted_provider }}
RANCHER_PASSWORD: ${{ secrets.RANCHER_PASSWORD }}
Expand Down
4 changes: 4 additions & 0 deletions cattle-config-provisioning.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
aksClusterConfig:
azureCredentialSecret: ""
dnsPrefix: ""
dnsServiceIp: 10.0.0.10
dockerBridgeCidr: 172.17.0.1/16
serviceCidr: 10.0.0.0/16
kubernetesVersion: 1.29.7
linuxAdminUsername: azureuser
loadBalancerSku: Standard
networkPlugin: kubenet
outboundType: LoadBalancer
nodePools:
- availabilityZones:
- "1"
Expand Down
2 changes: 1 addition & 1 deletion hosted/aks/helper/helper_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ func convertMapToAKSString(tags map[string]string) string {
return convertedString
}

// Complete cleanup steps for Azure AKS
// DeleteAKSClusteronAzure: Complete cleanup steps for Azure AKS
func DeleteAKSClusteronAzure(clusterName string) error {

fmt.Println("Deleting AKS resource group which will delete cluster too ...")
Expand Down
61 changes: 61 additions & 0 deletions hosted/aks/p1/p1_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -656,4 +656,65 @@ var _ = Describe("P1Provisioning", func() {
updateNodePoolModeCheck(cluster, ctx.RancherAdminClient)
})
})
Context("Network Policy and plugin", func() {
var (
calicoPolicy = "calico"
kubenetPlugin = "kubenet"
azure = "azure"
vnet = os.Getenv("AKS_VNET")
vnetRG = os.Getenv("AKS_VNET_RG")
subnet = "default"
)

for _, data := range []struct {
networkPlugin, networkPolicy, vnet string
testCaseID int64
}{
{
networkPlugin: kubenetPlugin,
networkPolicy: calicoPolicy,
testCaseID: 210,
},
{
networkPlugin: azure,
networkPolicy: calicoPolicy,
vnet: vnet,
testCaseID: 211,
},
{
networkPlugin: azure,
testCaseID: 212,
vnet: vnet,
},
{
networkPlugin: azure,
networkPolicy: azure,
vnet: vnet,
testCaseID: 213,
},
} {
data := data
It(fmt.Sprintf("Create cluster with NetworkPolicy %s & Network plugin %s", data.networkPolicy, data.networkPlugin), func() {
testCaseID = data.testCaseID
createFunc := func(clusterConfig *aks.ClusterConfig) {
clusterConfig.NetworkPlugin = &data.networkPlugin
if data.networkPolicy != "" {
clusterConfig.NetworkPolicy = &data.networkPolicy
}
if data.vnet != "" {
clusterConfig.VirtualNetwork = &data.vnet
clusterConfig.Subnet = &subnet
clusterConfig.VirtualNetworkResourceGroup = pointer.String(vnetRG)
}
}
var err error
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, createFunc)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)
})
}
})

})

0 comments on commit 93bbee5

Please sign in to comment.