diff --git a/.gitignore b/.gitignore index 6c6ffa9ab9d0..247ea4cb6fdc 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,6 @@ examples/**/test.tf examples/**/test.tfvars examples/**/terraform examples/**/terraform.zip + +#never upload the build to git +terraform-provider-azurerm diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index 0edf806433d9..ef8cc091d5d7 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -87,6 +87,29 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, }, + "max_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "min_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Computed: true, + }, + + "availability_zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // TODO: remove this in a future version "dns_prefix": { Type: schema.TypeString, @@ -565,6 +588,20 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi agentPoolProfile["count"] = int(*profile.Count) } + if profile.MinCount != nil { + agentPoolProfile["min_count"] = int(*profile.MinCount) + } + + if profile.MaxCount != nil { + agentPoolProfile["max_count"] = int(*profile.MaxCount) + } + + if profile.EnableAutoScaling != nil { + agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling + } + + agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones) + if profile.Name != nil { agentPoolProfile["name"] = *profile.Name } diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index 1d093bbdad27..314b9a4f92ce 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -417,6 +417,64 @@ func TestAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting(t *testing.T) }) } +func TestAccDataSourceAzureRMKubernetesCluster_autoscalingNoAvailabilityZones(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + config := testAccDataSourceAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + resource.TestCheckNoResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones"), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + config := testAccDataSourceAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.#", "2"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.0", "1"), + resource.TestCheckResourceAttr(dataSourceName, "agent_pool_profile.0.availability_zones.1", "2"), + ), + }, + }, + }) +} + func testAccDataSourceAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { r := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) return fmt.Sprintf(` @@ -584,3 +642,27 @@ data "azurerm_kubernetes_cluster" "test" { } `, r) } + +func testAccDataSourceAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + r := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, r) +} + +func testAccDataSourceAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + r := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, r) +} diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 6a0524b3c689..fbb075590a0d 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -119,6 +119,31 @@ func resourceArmKubernetesCluster() *schema.Resource { ValidateFunc: validation.IntBetween(1, 100), }, + "max_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "min_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Optional: true, + }, + + "availability_zones": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // TODO: remove this field in the next major version "dns_prefix": { Type: schema.TypeString, @@ -563,7 +588,11 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter kubernetesVersion := d.Get("kubernetes_version").(string) linuxProfile := expandKubernetesClusterLinuxProfile(d) - agentProfiles := expandKubernetesClusterAgentPoolProfiles(d) + agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + if err != nil { + return err + } + servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) networkProfile := expandKubernetesClusterNetworkProfile(d) addonProfiles := expandKubernetesClusterAddonProfiles(d) @@ -902,7 +931,8 @@ func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice. return []interface{}{values} } -func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) []containerservice.ManagedClusterAgentPoolProfile { +func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]containerservice.ManagedClusterAgentPoolProfile, error) { + configs := d.Get("agent_pool_profile").([]interface{}) profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0) @@ -933,10 +963,37 @@ func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) []containe if vnetSubnetID != "" { profile.VnetSubnetID = utils.String(vnetSubnetID) } + + if maxCount := int32(config["max_count"].(int)); maxCount > 0 { + profile.MaxCount = utils.Int32(maxCount) + } + + if minCount := int32(config["min_count"].(int)); minCount > 0 { + profile.MinCount = utils.Int32(minCount) + } + + if enableAutoScalingItf := config["enable_auto_scaling"]; enableAutoScalingItf != nil { + profile.EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool)) + + // Auto scaling will change the number of nodes, but the original count number should not be sent again. + // This avoid the cluster being resized after creation. + if *profile.EnableAutoScaling && !d.IsNewResource() { + profile.Count = nil + } + } + + if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 { + profile.AvailabilityZones = availavilityZones + } + + if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) { + return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling enabled but not setting min_count or max_count") + } + profiles = append(profiles, profile) } - return profiles + return profiles, nil } func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.ManagedClusterAgentPoolProfile, fqdn *string) []interface{} { @@ -957,6 +1014,20 @@ func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.Mana agentPoolProfile["count"] = int(*profile.Count) } + if profile.MinCount != nil { + agentPoolProfile["min_count"] = int(*profile.MinCount) + } + + if profile.MaxCount != nil { + agentPoolProfile["max_count"] = int(*profile.MaxCount) + } + + if profile.EnableAutoScaling != nil { + agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling + } + + agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones) + if fqdn != nil { // temporarily persist the parent FQDN here until `fqdn` is removed from the `agent_pool_profile` agentPoolProfile["fqdn"] = *fqdn diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 9858ca7ed1fe..83e862fabcf5 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -628,6 +628,71 @@ func TestAccAzureRMKubernetesCluster_virtualMachineScaleSets(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.#", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.0", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.1", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_multipleAgents(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -1557,3 +1622,64 @@ resource "azurerm_kubernetes_cluster" "test" { } `, rInt, location, rInt, rInt, clientId, clientSecret) } + +func testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "pool1" + min_count = "1" + max_count = "2" + enable_auto_scaling = "true" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "pool1" + min_count = "1" + max_count = "2" + enable_auto_scaling = "true" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + availability_zones = ["1", "2"] + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 16e42b3afe8e..27a812d85f82 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -86,6 +86,16 @@ A `agent_pool_profile` block exports the following: * `max_pods` - The maximum number of pods that can run on each agent. +* `availability_zones` - The availability zones used for the nodes. + +* `enable_auto_scaling` - If the auto-scaler is enabled. + +* `min_count` - Minimum number of nodes for auto-scaling + +* `min_count` - Minimum number of nodes for auto-scaling + +* `max_count` - Maximum number of nodes for auto-scaling + * `name` - The name assigned to this pool of agents. * `os_disk_size_gb` - The size of the Agent VM's Operating System Disk in GB. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index a66b29e7000f..c5e507e99478 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -120,6 +120,14 @@ A `agent_pool_profile` block supports the following: * `vm_size` - (Required) The size of each VM in the Agent Pool (e.g. `Standard_F1`). Changing this forces a new resource to be created. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. + +* `availability_zones` - (Optional) Availability zones for nodes. The property `type` of the `agent_pool_profile` must be set to `VirtualMachineScaleSets` in order to use availability zones. + +* `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Note that auto scaling feature requires the that the `type` is set to `VirtualMachineScaleSets` + +* `min_count` - (Optional) Minimum number of nodes for auto-scaling + +* `max_count` - (Optional) Maximum number of nodes for auto-scaling * `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.