Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_kubernetes_cluster: Support for empty_bulk_delete_max in auto_scaler_profile block #11060

Merged
merged 4 commits into from
Apr 20, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,11 @@ func resourceKubernetesCluster() *schema.Resource {
Optional: true,
Computed: true,
},
"max_empty_bulk_delete": {
liammoat marked this conversation as resolved.
Show resolved Hide resolved
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"skip_nodes_with_local_storage": {
Type: schema.TypeBool,
Optional: true,
Expand Down Expand Up @@ -2090,6 +2095,11 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed
scaleDownUtilizationThreshold = *profile.ScaleDownUtilizationThreshold
}

maxEmptyBulkDelete := ""
if profile.MaxEmptyBulkDelete != nil {
maxEmptyBulkDelete = *profile.MaxEmptyBulkDelete
}

scanInterval := ""
if profile.ScanInterval != nil {
scanInterval = *profile.ScanInterval
Expand Down Expand Up @@ -2117,6 +2127,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed
"scale_down_unneeded": scaleDownUnneededTime,
"scale_down_unready": scaleDownUnreadyTime,
"scale_down_utilization_threshold": scaleDownUtilizationThreshold,
"max_empty_bulk_delete": maxEmptyBulkDelete,
"scan_interval": scanInterval,
"skip_nodes_with_local_storage": skipNodesWithLocalStorage,
"skip_nodes_with_system_pods": skipNodesWithSystemPods,
Expand All @@ -2141,6 +2152,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser
scaleDownUnneededTime := config["scale_down_unneeded"].(string)
scaleDownUnreadyTime := config["scale_down_unready"].(string)
scaleDownUtilizationThreshold := config["scale_down_utilization_threshold"].(string)
maxEmptyBulkDelete := config["max_empty_bulk_delete"].(string)
scanInterval := config["scan_interval"].(string)
skipNodesWithLocalStorage := config["skip_nodes_with_local_storage"].(bool)
skipNodesWithSystemPods := config["skip_nodes_with_system_pods"].(bool)
Expand All @@ -2156,6 +2168,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser
ScaleDownUnneededTime: utils.String(scaleDownUnneededTime),
ScaleDownUnreadyTime: utils.String(scaleDownUnreadyTime),
ScaleDownUtilizationThreshold: utils.String(scaleDownUtilizationThreshold),
MaxEmptyBulkDelete: utils.String(maxEmptyBulkDelete),
ScanInterval: utils.String(scanInterval),
SkipNodesWithLocalStorage: utils.String(strconv.FormatBool(skipNodesWithLocalStorage)),
SkipNodesWithSystemPods: utils.String(strconv.FormatBool(skipNodesWithSystemPods)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ func testAccKubernetesCluster_autoScalingProfile(t *testing.T) {
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_unneeded").HasValue("15m"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_unready").HasValue("15m"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_utilization_threshold").HasValue("0.5"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.max_empty_bulk_delete").HasValue("50"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scan_interval").HasValue("10s"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.skip_nodes_with_local_storage").HasValue("false"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.skip_nodes_with_system_pods").HasValue("false"),
Expand Down Expand Up @@ -558,6 +559,7 @@ resource "azurerm_kubernetes_cluster" "test" {
scale_down_unneeded = "15m"
scale_down_unready = "15m"
scale_down_utilization_threshold = "0.5"
max_empty_bulk_delete = "50"
skip_nodes_with_local_storage = false
skip_nodes_with_system_pods = false
}
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,8 @@ A `auto_scaler_profile` block supports the following:

* `scale_down_utilization_threshold` - Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`.

* `max_empty_bulk_delete` - Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`.

* `skip_nodes_with_local_storage` - If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`.

* `skip_nodes_with_system_pods` - If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`.
Expand Down