diff --git a/.changelog/8412.txt b/.changelog/8412.txt new file mode 100644 index 00000000000..4a010c54f30 --- /dev/null +++ b/.changelog/8412.txt @@ -0,0 +1,3 @@ +```release-note: enhancement +compute: added field `instance_lifecycle_policy` to `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` (ga) +``` diff --git a/google/resource_compute_instance_group_manager_test.go b/google/resource_compute_instance_group_manager_test.go index 8282a6acc9b..aa74393fc23 100644 --- a/google/resource_compute_instance_group_manager_test.go +++ b/google/resource_compute_instance_group_manager_test.go @@ -662,6 +662,9 @@ resource "google_compute_instance_group_manager" "igm-update" { port = 8080 } + instance_lifecycle_policy { + force_update_on_repair = "YES" + } } `, template, target, description, igm) } @@ -755,6 +758,9 @@ resource "google_compute_instance_group_manager" "igm-update" { } + instance_lifecycle_policy { + force_update_on_repair = "NO" + } } `, template1, target1, target2, template2, description, igm) } @@ -1733,6 +1739,9 @@ resource "google_compute_instance_group_manager" "igm-basic" { max_surge_fixed = 0 max_unavailable_percent = 50 } + instance_lifecycle_policy { + force_update_on_repair = "YES" + } wait_for_instances = true wait_for_instances_status = "UPDATED" } diff --git a/google/resource_compute_region_instance_group_manager_test.go b/google/resource_compute_region_instance_group_manager_test.go index faff0181046..921ccd73a82 100644 --- a/google/resource_compute_region_instance_group_manager_test.go +++ b/google/resource_compute_region_instance_group_manager_test.go @@ -538,6 +538,9 @@ resource "google_compute_region_instance_group_manager" "igm-update" { } + instance_lifecycle_policy { + force_update_on_repair = "YES" + } } `, template, target, igm) } @@ -631,6 +634,9 @@ resource "google_compute_region_instance_group_manager" "igm-update" { } + instance_lifecycle_policy { + force_update_on_repair = "NO" + } } `, template1, target1, target2, template2, igm) } diff --git a/google/services/compute/resource_compute_instance_group_manager.go b/google/services/compute/resource_compute_instance_group_manager.go index da96e5fdf3f..ab9675e571d 100644 --- a/google/services/compute/resource_compute_instance_group_manager.go +++ b/google/services/compute/resource_compute_instance_group_manager.go @@ -275,6 +275,25 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { }, }, + "instance_lifecycle_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The instance lifecycle policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "force_update_on_repair": { + Type: schema.TypeString, + Default: "NO", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YES", "NO"}, true), + Description: `Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type.`, + }, + }, + }, + }, + "wait_for_instances": { Type: schema.TypeBool, Optional: true, @@ -459,6 +478,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), + InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), StatefulPolicy: expandStatefulPolicy(d), // Force send TargetSize to allow a value of 0. @@ -670,6 +690,9 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { + return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) + } if err = d.Set("status", flattenStatus(manager.Status)); err != nil { return fmt.Errorf("Error setting status in state: %s", err.Error()) } @@ -735,6 +758,11 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte change = true } + if d.HasChange("instance_lifecycle_policy") { + updatedManager.InstanceLifecyclePolicy = expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})) + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d) change = true @@ -1002,6 +1030,16 @@ func expandFixedOrPercent(configured []interface{}) *compute.FixedOrPercent { return fixedOrPercent } +func expandInstanceLifecyclePolicy(configured []interface{}) *compute.InstanceGroupManagerInstanceLifecyclePolicy { + instanceLifecyclePolicy := &compute.InstanceGroupManagerInstanceLifecyclePolicy{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + instanceLifecyclePolicy.ForceUpdateOnRepair = data["force_update_on_repair"].(string) + } + return instanceLifecyclePolicy +} + func expandUpdatePolicy(configured []interface{}) *compute.InstanceGroupManagerUpdatePolicy { updatePolicy := &compute.InstanceGroupManagerUpdatePolicy{} @@ -1106,6 +1144,16 @@ func flattenUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) return results } +func flattenInstanceLifecyclePolicy(instanceLifecyclePolicy *compute.InstanceGroupManagerInstanceLifecyclePolicy) []map[string]interface{} { + results := []map[string]interface{}{} + if instanceLifecyclePolicy != nil { + ilp := map[string]interface{}{} + ilp["force_update_on_repair"] = instanceLifecyclePolicy.ForceUpdateOnRepair + results = append(results, ilp) + } + return results +} + func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]interface{} { results := []map[string]interface{}{} data := map[string]interface{}{ diff --git a/google/services/compute/resource_compute_region_instance_group_manager.go b/google/services/compute/resource_compute_region_instance_group_manager.go index 058b5387297..df570b5e1d0 100644 --- a/google/services/compute/resource_compute_region_instance_group_manager.go +++ b/google/services/compute/resource_compute_region_instance_group_manager.go @@ -243,6 +243,25 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Description: `The shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).`, }, + "instance_lifecycle_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The instance lifecycle policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "force_update_on_repair": { + Type: schema.TypeString, + Default: "NO", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YES", "NO"}, false), + Description: `Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type.`, + }, + }, + }, + }, + "update_policy": { Type: schema.TypeList, Computed: true, @@ -433,6 +452,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), + InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), DistributionPolicy: expandDistributionPolicy(d), StatefulPolicy: expandStatefulPolicy(d), // Force send TargetSize to allow size of 0. @@ -616,6 +636,9 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { + return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) + } if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) } @@ -677,6 +700,11 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } + if d.HasChange("instance_lifecycle_policy") { + updatedManager.InstanceLifecyclePolicy = expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})) + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d) change = true diff --git a/website/docs/r/compute_instance_group_manager.html.markdown b/website/docs/r/compute_instance_group_manager.html.markdown index 8312acd499d..a38d999c59e 100644 --- a/website/docs/r/compute_instance_group_manager.html.markdown +++ b/website/docs/r/compute_instance_group_manager.html.markdown @@ -208,7 +208,7 @@ instance_lifecycle_policy { } ``` -* `force_update_on_repair` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. +* `force_update_on_repair` - (Optional, (https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. - - - diff --git a/website/docs/r/compute_region_instance_group_manager.html.markdown b/website/docs/r/compute_region_instance_group_manager.html.markdown index 45e90554be0..4b5bca0017e 100644 --- a/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -218,7 +218,7 @@ instance_lifecycle_policy { } ``` -* `force_update_on_repair` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type. +* `force_update_on_repair` - (Optional, (https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type. - - - The `all_instances_config` block supports: