From 23f1dc0de144188bb08434bbc248a2fa8966f38d Mon Sep 17 00:00:00 2001 From: The Magician Date: Mon, 18 Sep 2023 14:59:03 -0400 Subject: [PATCH] promote node_pool_auto_config field to GA provider (#8951) (#15884) Signed-off-by: Modular Magician --- .changelog/8951.txt | 3 + .../container/resource_container_cluster.go | 124 ++++++++++++++++++ .../resource_container_cluster_test.go | 52 ++++++++ .../docs/r/container_cluster.html.markdown | 6 +- 4 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 .changelog/8951.txt diff --git a/.changelog/8951.txt b/.changelog/8951.txt new file mode 100644 index 00000000000..358212d0967 --- /dev/null +++ b/.changelog/8951.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +container: promoted `node_pool_auto_config` field in `google_container_cluster` from beta provider to GA provider. (ga) +``` diff --git a/google/services/container/resource_container_cluster.go b/google/services/container/resource_container_cluster.go index 80ef9537aa2..4634ac78989 100644 --- a/google/services/container/resource_container_cluster.go +++ b/google/services/container/resource_container_cluster.go @@ -1246,6 +1246,34 @@ func ResourceContainerCluster() *schema.Resource { "node_pool_defaults": clusterSchemaNodePoolDefaults(), + "node_pool_auto_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `List of network tags applied to auto-provisioned node pools.`, + }, + }, + }, + }, + }, + }, + }, + "node_version": { Type: schema.TypeString, Optional: true, @@ -1912,6 +1940,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er NotificationConfig: expandNotificationConfig(d.Get("notification_config")), ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), + NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")), CostManagementConfig: expandCostManagementConfig(d.Get("cost_management_config")), EnableK8sBetaApis: expandEnableK8sBetaApis(d.Get("enable_k8s_beta_apis"), nil), } @@ -2034,6 +2063,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.MonitoringConfig = expandMonitoringConfig(v) } + if err := validateNodePoolAutoConfig(cluster); err != nil { + return err + } + if err := validatePrivateClusterConfig(cluster); err != nil { return err } @@ -2492,6 +2525,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return err } + if err := d.Set("node_pool_auto_config", flattenNodePoolAutoConfig(cluster.NodePoolAutoConfig)); err != nil { + return err + } + if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil { return err } @@ -3596,6 +3633,27 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s Security Posture Config has been updated to %#v", d.Id(), req.Update.DesiredSecurityPostureConfig) } + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { + tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigNetworkTags: &container.NetworkTags{ + Tags: tpgresource.ConvertStringArr(tags), + ForceSendFields: []string{"Tags"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config network tags") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config network tags have been updated", d.Id()) + } + d.Partial(false) if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -4674,6 +4732,34 @@ func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interfa return []map[string]interface{}{result} } +func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + npac := &container.NodePoolAutoConfig{} + config := l[0].(map[string]interface{}) + + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { + npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) + } + return npac +} + +func expandNodePoolAutoConfigNetworkTags(configured interface{}) *container.NetworkTags { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + nt := &container.NetworkTags{} + config := l[0].(map[string]interface{}) + + if v, ok := config["tags"]; ok && len(v.([]interface{})) > 0 { + nt.Tags = tpgresource.ConvertStringArr(v.([]interface{})) + } + return nt +} + func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} { if c == nil { return nil @@ -5331,6 +5417,31 @@ func flattenManagedPrometheusConfig(c *container.ManagedPrometheusConfig) []map[ } } +func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.NetworkTags != nil { + result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) + } + + return []map[string]interface{}{result} +} + +func flattenNodePoolAutoConfigNetworkTags(c *container.NetworkTags) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.Tags != nil { + result["tags"] = c.Tags + } + return []map[string]interface{}{result} +} + func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) @@ -5537,6 +5648,19 @@ func BinaryAuthorizationDiffSuppress(k, old, new string, r *schema.ResourceData) return false } +func validateNodePoolAutoConfig(cluster *container.Cluster) error { + if cluster == nil || cluster.NodePoolAutoConfig == nil { + return nil + } + if cluster.NodePoolAutoConfig != nil && cluster.NodePoolAutoConfig.NetworkTags != nil && len(cluster.NodePoolAutoConfig.NetworkTags.Tags) > 0 { + if (cluster.Autopilot == nil || !cluster.Autopilot.Enabled) && (cluster.Autoscaling == nil || !cluster.Autoscaling.EnableNodeAutoprovisioning) { + return fmt.Errorf("node_pool_auto_config network tags can only be set if enable_autopilot or cluster_autoscaling is enabled") + } + } + + return nil +} + func containerClusterSurgeSettingsCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { if v, ok := d.GetOk("cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.strategy"); ok { if v != "SURGE" { diff --git a/google/services/container/resource_container_cluster_test.go b/google/services/container/resource_container_cluster_test.go index f218b90db76..66a6094806b 100644 --- a/google/services/container/resource_container_cluster_test.go +++ b/google/services/container/resource_container_cluster_test.go @@ -2156,6 +2156,33 @@ func TestAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(t *testing. }) } +func TestAccContainerCluster_nodeAutoprovisioningNetworkTags(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioning(clusterName, true, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning", + "node_pool_auto_config.0.network_tags.0.tags.0", "test-network-tag"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + func TestAccContainerCluster_withShieldedNodes(t *testing.T) { t.Parallel() @@ -2266,6 +2293,31 @@ func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) { }) } +func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, true, ""), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { t.Parallel() diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 3a12ce9213e..c75c9a6371c 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -268,7 +268,7 @@ region are guaranteed to support the same version. to say "these are the _only_ node pools associated with this cluster", use the [google_container_node_pool](container_node_pool.html) resource instead of this property. -* `node_pool_auto_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Node pool configs that apply to auto-provisioned node pools in +* `node_pool_auto_config` - (Optional) Node pool configs that apply to auto-provisioned node pools in [autopilot](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison) clusters and [node auto-provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)-enabled clusters. Structure is [documented below](#nested_node_pool_auto_config). @@ -1027,11 +1027,11 @@ workload_identity_config { The `node_pool_auto_config` block supports: -* `network_tags` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) - The network tag config for the cluster's automatically provisioned node pools. +* `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. The `network_tags` block supports: -* `tags` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) - List of network tags applied to auto-provisioned node pools. +* `tags` (Optional) - List of network tags applied to auto-provisioned node pools. ```hcl node_pool_auto_config {