diff --git a/mmv1/templates/terraform/post_create_failure/go/delete_on_failure.go.tmpl b/mmv1/templates/terraform/post_create_failure/go/delete_on_failure.go.tmpl new file mode 100644 index 000000000000..c837747bffba --- /dev/null +++ b/mmv1/templates/terraform/post_create_failure/go/delete_on_failure.go.tmpl @@ -0,0 +1,14 @@ +log.Printf("[WARN] Attempt to clean up {{$.Name}} if it still exists") +var cleanErr error +if cleanErr = resource{{$.ResourceName}}Read(d, meta); cleanErr == nil { + if d.Id() != "" { + log.Printf("[WARN] {{$.Name}} %q still exists, attempting to delete...", d.Id()) + if cleanErr = resource{{$.ResourceName}}Delete(d, meta); cleanErr == nil { + log.Printf("[WARN] Invalid {{$.Name}} was successfully deleted") + d.SetId("") + } + } +} +if cleanErr != nil { + log.Printf("[WARN] Could not confirm cleanup of {{$.Name}} if created in error state: %v", cleanErr) +} diff --git a/mmv1/templates/terraform/post_delete/go/netapp_volume_replication_delete_destination_volume.go.tmpl b/mmv1/templates/terraform/post_delete/go/netapp_volume_replication_delete_destination_volume.go.tmpl new file mode 100644 index 000000000000..caab159c485f --- /dev/null +++ b/mmv1/templates/terraform/post_delete/go/netapp_volume_replication_delete_destination_volume.go.tmpl @@ -0,0 +1,34 @@ +// A replication CREATE also created a destination volume +// A user can chooses to delete the destination volume after deleting the replication +if d.Get("delete_destination_volume").(bool) == true { + log.Printf("[DEBUG] delete_destination_volume is true. Deleting destination volume %v", d.Get("destination_volume")) + destination_volume := d.Get("destination_volume").(string) + del_url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}NetappBasePath{{"}}"}}"+destination_volume+"?force=true") + if err != nil { + return err + } + + var obj map[string]interface{} + res_del, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: del_url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Volume") + } + + err = NetappOperationWaitTime( + config, res_del, project, "Deleting destination volume", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting destination Volume %q: %#v", destination_volume, res_del) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_delete/go/network_services_gateway.go.tmpl b/mmv1/templates/terraform/post_delete/go/network_services_gateway.go.tmpl new file mode 100644 index 000000000000..90b06bc46791 --- /dev/null +++ b/mmv1/templates/terraform/post_delete/go/network_services_gateway.go.tmpl @@ -0,0 +1,15 @@ +if d.Get("delete_swg_autogen_router_on_destroy").(bool) { + log.Print("[DEBUG] The field delete_swg_autogen_router_on_destroy is true. Deleting swg_autogen_router.") + gateways, err := gatewaysSameLocation(d, config, billingProject, userAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + if isLastSWGGateway(gateways, network) { + err := deleteSWGAutoGenRouter(d, config, billingProject, userAgent) + if err != nil { + return err + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_delete/go/org_security_policy.go.tmpl b/mmv1/templates/terraform/post_delete/go/org_security_policy.go.tmpl new file mode 100644 index 000000000000..ad5cd2f70cf9 --- /dev/null +++ b/mmv1/templates/terraform/post_delete/go/org_security_policy.go.tmpl @@ -0,0 +1,10 @@ +parent := d.Get("parent").(string) +var opRes map[string]interface{} +err = ComputeOrgOperationWaitTimeWithResponse( + config, res, &opRes, parent, "Creating OrganizationSecurityPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + +if err != nil { + // The resource didn't actually delete + return fmt.Errorf("Error waiting to delete OrganizationSecurityPolicy: %s", err) +} diff --git a/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl new file mode 100644 index 000000000000..c2011666a8b2 --- /dev/null +++ b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl @@ -0,0 +1,39 @@ +privateCloudPollRead := func(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") + if err != nil { + return nil, err + } + billingProject := "" + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for PrivateCloud: %s", err) + } + billingProject = project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + return res, nil + } + } + +err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting {{$.Name}}", d.Timeout(schema.TimeoutDelete), 10) +if err != nil { + return fmt.Errorf("Error waiting to delete PrivateCloud: %s", err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_delete/private_cloud.go.erb b/mmv1/templates/terraform/post_delete/private_cloud.go.erb index b9413ba82a72..669377a2f9ab 100644 --- a/mmv1/templates/terraform/post_delete/private_cloud.go.erb +++ b/mmv1/templates/terraform/post_delete/private_cloud.go.erb @@ -1,7 +1,7 @@ privateCloudPollRead := func(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { config := meta.(*transport_tpg.Config) - url, err := tpgresource.ReplaceVars(d, config, "<%= "{{#{object.__product.name}BasePath}}#{object.self_link_uri}" -%>") + url, err := tpgresource.ReplaceVars(d, config, "{{<%=object.__product.name-%>BasePath}}<%=object.self_link_uri-%>") if err != nil { return nil, err } diff --git a/mmv1/templates/terraform/post_import/go/agent_pool.go.tmpl b/mmv1/templates/terraform/post_import/go/agent_pool.go.tmpl new file mode 100644 index 000000000000..d32a8563bbbb --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/agent_pool.go.tmpl @@ -0,0 +1,3 @@ +if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for AgentPool %q to be CREATED during importing: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_import/go/cloud_identity_group_membership.go.tmpl b/mmv1/templates/terraform/post_import/go/cloud_identity_group_membership.go.tmpl new file mode 100644 index 000000000000..f0ffc54d9aee --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/cloud_identity_group_membership.go.tmpl @@ -0,0 +1,17 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + // Configure "group" property, which does not appear in the response body. + group := regexp.MustCompile(`groups/[^/]+`).FindString(id) + if err := d.Set("group", group); err != nil { + return nil, fmt.Errorf("Error setting group property: %s", err) + } diff --git a/mmv1/templates/terraform/post_import/go/cloudbuild_trigger.go.tmpl b/mmv1/templates/terraform/post_import/go/cloudbuild_trigger.go.tmpl new file mode 100644 index 000000000000..c1e1115f2ea9 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/cloudbuild_trigger.go.tmpl @@ -0,0 +1,20 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + // Force legacy id format for global triggers. + id = strings.ReplaceAll(id, "/locations//", "/") + id = strings.ReplaceAll(id, "/locations/global/", "/") + d.SetId(id) + if d.Get("location") == "" { + // Happens when imported with legacy import format. + d.Set("location", "global") + } diff --git a/mmv1/templates/terraform/post_import/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/post_import/go/datastream_stream.go.tmpl new file mode 100644 index 000000000000..ac64571d3a21 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/datastream_stream.go.tmpl @@ -0,0 +1,15 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during import: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_import/go/integration_connectors_connection.go.tmpl b/mmv1/templates/terraform/post_import/go/integration_connectors_connection.go.tmpl new file mode 100644 index 000000000000..c78d42a26036 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/integration_connectors_connection.go.tmpl @@ -0,0 +1,3 @@ +if err := waitforConnectionReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for Connection %q to be in a stable state: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_import/go/lien_import.tmpl b/mmv1/templates/terraform/post_import/go/lien_import.tmpl new file mode 100644 index 000000000000..3bd7626ef78c --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/lien_import.tmpl @@ -0,0 +1,7 @@ +parent, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}parent{{"}}"}}") +if err != nil { + return nil, err +} +if err := d.Set("parent", parent); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) +} diff --git a/mmv1/templates/terraform/post_import/go/private_connection.go.tmpl b/mmv1/templates/terraform/post_import/go/private_connection.go.tmpl new file mode 100644 index 000000000000..cba72c4e2289 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/private_connection.go.tmpl @@ -0,0 +1,3 @@ +if err := waitForPrivateConnectionReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for PrivateConnection %q to be CREATED during importing: %q", d.Get("name").(string), err) +} diff --git a/mmv1/templates/terraform/post_import/go/privateca_import.go.tmpl b/mmv1/templates/terraform/post_import/go/privateca_import.go.tmpl new file mode 100644 index 000000000000..a023ec96530b --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/privateca_import.go.tmpl @@ -0,0 +1,3 @@ +if err := d.Set("ignore_active_certificates_on_deletion", false); err != nil { + return nil, err +} diff --git a/mmv1/templates/terraform/post_update/go/cloud_scheduler.go.tmpl b/mmv1/templates/terraform/post_update/go/cloud_scheduler.go.tmpl new file mode 100644 index 000000000000..7bdceda67aa1 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/cloud_scheduler.go.tmpl @@ -0,0 +1,33 @@ +if d.HasChange("paused") { + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk { + if paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + } + + linkTmpl := fmt.Sprintf("{{"{{"}}CloudSchedulerBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/jobs/{{"{{"}}name{{"}}"}}:%s", endpoint) + url, err = tpgresource.ReplaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: emptyReqBody, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/cloudbuild_bitbucketserver_config.go.tmpl b/mmv1/templates/terraform/post_update/go/cloudbuild_bitbucketserver_config.go.tmpl new file mode 100644 index 000000000000..5bd9b8cfe9ca --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/cloudbuild_bitbucketserver_config.go.tmpl @@ -0,0 +1,86 @@ +if d.HasChange("connected_repositories") { + o, n := d.GetChange("connected_repositories") + oReposSet, ok := o.(*schema.Set) + if !ok { + return fmt.Errorf("Error reading old connected repositories") + } + nReposSet, ok := n.(*schema.Set) + if !ok { + return fmt.Errorf("Error reading new connected repositories") + } + + removeRepos := oReposSet.Difference(nReposSet).List() + createRepos := nReposSet.Difference(oReposSet).List() + + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}CloudBuildBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bitbucketServerConfigs/{{"{{"}}config_id{{"}}"}}:removeBitbucketServerConnectedRepository") + if err != nil { + return err + } + + // send remove repo requests. + for _, repo := range removeRepos { + obj := make(map[string]interface{}) + obj["connectedRepository"] = repo + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error removing connected_repositories: %s", err) + } + } + + // if repos to create, prepare and send batchCreate request + if len(createRepos) > 0 { + parent, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bitbucketServerConfigs/{{"{{"}}config_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + var requests []interface{} + for _, repo := range createRepos { + connectedRepo := make(map[string]interface{}) + connectedRepo["parent"] = parent + connectedRepo["repo"] = repo + + connectedRepoRequest := make(map[string]interface{}) + connectedRepoRequest["parent"] = parent + connectedRepoRequest["bitbucketServerConnectedRepository"] = connectedRepo + + requests = append(requests, connectedRepoRequest) + } + obj = make(map[string]interface{}) + obj["requests"] = requests + + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}CloudBuildBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bitbucketServerConfigs/{{"{{"}}config_id{{"}}"}}/connectedRepositories:batchCreate") + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating connected_repositories: %s", err) + } + + err = CloudBuildOperationWaitTime( + config, res, project, "Updating connected_repositories on BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error waiting to create connected_repositories: %s", err) + } + } +} else { + log.Printf("[DEBUG] connected_repositories have no changes") +} diff --git a/mmv1/templates/terraform/post_update/go/compute_per_instance_config.go.tmpl b/mmv1/templates/terraform/post_update/go/compute_per_instance_config.go.tmpl new file mode 100644 index 000000000000..b8b89436aac5 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/compute_per_instance_config.go.tmpl @@ -0,0 +1,48 @@ +// Instance name in applyUpdatesToInstances request must include zone +instanceName, err := tpgresource.ReplaceVars(d, config, "zones/{{"{{"}}zone{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") +if err != nil { + return err +} + +obj = make(map[string]interface{}) +obj["instances"] = []string{instanceName} + +minAction := d.Get("minimal_action") +if minAction == "" { + minAction = "NONE" +} +obj["minimalAction"] = minAction + +mostDisruptiveAction := d.Get("most_disruptive_allowed_action") +if tpgresource.IsEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { + mostDisruptiveAction = "REPLACE" +} +obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction + +url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instanceGroupManagers/{{"{{"}}instance_group_manager{{"}}"}}/applyUpdatesToInstances") +if err != nil { + return err +} + +log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) +res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), +}) + +if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) +} + +err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + +if err != nil { + return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/compute_region_per_instance_config.go.tmpl b/mmv1/templates/terraform/post_update/go/compute_region_per_instance_config.go.tmpl new file mode 100644 index 000000000000..54c1dc2bd63b --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/compute_region_per_instance_config.go.tmpl @@ -0,0 +1,48 @@ +// Instance name in applyUpdatesToInstances request must include zone +instanceName, err := findInstanceName(d, config) +if err != nil { + return err +} + +obj = make(map[string]interface{}) +obj["instances"] = []string{instanceName} + +minAction := d.Get("minimal_action") +if minAction == "" { + minAction = "NONE" +} +obj["minimalAction"] = minAction + +mostDisruptiveAction := d.Get("most_disruptive_allowed_action") +if tpgresource.IsEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { + mostDisruptiveAction = "REPLACE" +} +obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction + +url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceGroupManagers/{{"{{"}}region_instance_group_manager{{"}}"}}/applyUpdatesToInstances") +if err != nil { + return err +} + +log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) +res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), +}) + +if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) +} + +err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + +if err != nil { + return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/post_update/go/datastream_stream.go.tmpl new file mode 100644 index 000000000000..921d99698e31 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/datastream_stream.go.tmpl @@ -0,0 +1,15 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during update: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl b/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl new file mode 100644 index 000000000000..f82d1d27a253 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl @@ -0,0 +1,19 @@ +if d.HasChange("oidc") { + updatedClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") + if updatedClientSecret != nil && updatedClientSecret != "" { + // After the update, reading from the API returns a different thumbprint + // for the client secret value, which clears the plain_text. We set the plain_text since + // this case should not warrant a diff. + if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { + return err + } + oidc := d.Get("oidc") + clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = updatedClientSecret + if err := d.Set("oidc", oidc); err != nil { + return err + } + return nil + } +} diff --git a/mmv1/templates/terraform/post_update/go/integration_connectors_connection.go.tmpl b/mmv1/templates/terraform/post_update/go/integration_connectors_connection.go.tmpl new file mode 100644 index 000000000000..db66cc890e3e --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/integration_connectors_connection.go.tmpl @@ -0,0 +1,3 @@ +if err := waitforConnectionReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return fmt.Errorf("Error waiting for Connection %q to finish being in UPDATING state during updation: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl b/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl new file mode 100644 index 000000000000..5cac7eebb9b2 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl @@ -0,0 +1,89 @@ +// Manage stopping and resuming a mirror + +var obj2 map[string]interface{} +do_change := false +var action string +var targetState string +// state transitions +// there can be a glitch is a transfer starts/ends between reading mirrorState +// and sending the action. This will be very rare. No workaround. +if d.Get("replication_enabled").(bool) == true { + switch d.Get("mirror_state").(string) { + case "STOPPED": + // replication_enabled==true, mirrorState==STOPPED -> resume + action = "resume" + targetState = "MIRRORED" + do_change = true + default: + // replication_enabled==true, mirrorState!=STOPPED -> NOOP + do_change = false + } +} else { + switch d.Get("mirror_state").(string) { + case "MIRRORED": + // replication_enabled==false, mirrorState==MIRRORED -> stop + action = "stop" + targetState = "STOPPED" + do_change = true + case "TRANSFERRING": + // replication_enabled==false, mirrorState==TRANSFERRING -> force stop + // User needs to add force_stopping = true, otherwise will receive error + action = "stop" + targetState = "STOPPED" + do_change = true + case "PREPARING": + // replication_enabled==false, mirrorState==PREPARING -> stop + // Currently cannot be stopped. User will receive following error: + // Error code 3, message: invalid request error: "Replication in preparing state. Please wait until replication is in 'READY' STATE and try again later.". + // User needs to wait until mirrorState=MIRRORED + action = "stop" + targetState = "STOPPED" + do_change = true + default: + // replication_enabled==false, mirrorState==STOPPED -> NOOP + do_change = false + } + + if do_change == true && d.Get("force_stopping").(bool) == true { + obj2 = map[string]interface{}{ + "force": true, + } + } +} + +if do_change { + // We need to send STOP/RESUME API calls + rawurl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}NetappBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/volumes/{{"{{"}}volume_name{{"}}"}}/replications/{{"{{"}}name{{"}}"}}:"+action) + if err != nil { + return err + } + + res2, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: rawurl, + UserAgent: userAgent, + Body: obj2, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error stopping/resuming replication %q: %s", d.Id(), err) + } + + err = NetappOperationWaitTime( + config, res2, project, "volume replication "+action, userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + // If user specified to wait for mirror operations, wait to reach target state + if d.Get("wait_for_mirror").(bool) == true { + err = NetAppVolumeReplicationWaitForMirror(d, meta, targetState) + if err != nil { + return fmt.Errorf("Error waiting for volume replication to reach mirror_state==%s: %s", targetState, err) + } + } +} diff --git a/mmv1/templates/terraform/post_update/go/notebooks_instance.go.tmpl b/mmv1/templates/terraform/post_update/go/notebooks_instance.go.tmpl new file mode 100644 index 000000000000..819cff2d1dd2 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/notebooks_instance.go.tmpl @@ -0,0 +1,21 @@ +name := d.Get("name").(string) +state := d.Get("state").(string) +desired_state := d.Get("desired_state").(string) + +if state != desired_state { + verb := "start" + if desired_state == "STOPPED" { + verb = "stop" + } + pRes, err := modifyNotebooksInstanceState(config, d, project, billingProject, userAgent, verb) + if err != nil { + return err + } + + if err := waitForNotebooksOperation(config, d, project, billingProject, userAgent, pRes); err != nil { + return fmt.Errorf("Error waiting to modify Notebook Instance state: %s", err) + } + +} else { + log.Printf("[DEBUG] Notebook Instance %q has state %q.", name, state) +} diff --git a/mmv1/templates/terraform/post_update/go/org_security_policy.go.tmpl b/mmv1/templates/terraform/post_update/go/org_security_policy.go.tmpl new file mode 100644 index 000000000000..8c3f8b47dd17 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/org_security_policy.go.tmpl @@ -0,0 +1,10 @@ +parent := d.Get("parent").(string) +var opRes map[string]interface{} +err = ComputeOrgOperationWaitTimeWithResponse( + config, res, &opRes, parent, "Creating OrganizationSecurityPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + +if err != nil { + // The resource didn't actually update + return fmt.Errorf("Error waiting to update OrganizationSecurityPolicy: %s", err) +} diff --git a/mmv1/templates/terraform/post_update/go/private_cloud.go.tmpl b/mmv1/templates/terraform/post_update/go/private_cloud.go.tmpl new file mode 100644 index 000000000000..a20adb96e5fc --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/private_cloud.go.tmpl @@ -0,0 +1,54 @@ +mgmtClusterProp, err := expandVmwareenginePrivateCloudManagementCluster(d.Get("management_cluster"), d, config) +if v, ok := d.GetOkExists("management_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mgmtClusterProp)) { + obj["managementCluster"] = mgmtClusterProp +} + +mgmtMap := mgmtClusterProp.(map[string]interface{}) +parentUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}VmwareengineBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/privateClouds/{{"{{"}}name{{"}}"}}") +if err != nil { + return err +} + +clusterUrl := fmt.Sprintf("%s/clusters/%s", parentUrl, mgmtMap["clusterId"]) +clusterUpdateMask := []string{} +clusterObj := make(map[string]interface{}) + +if v, ok := d.GetOkExists("management_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(mgmtClusterProp)) && (ok || !reflect.DeepEqual(v, mgmtClusterProp)) { + clusterObj["nodeTypeConfigs"] = mgmtMap["nodeTypeConfigs"] +} + +if d.HasChange("management_cluster") { + clusterUpdateMask = append(clusterUpdateMask, "nodeTypeConfigs.*.nodeCount") +} + +clusterPatchUrl, err := transport_tpg.AddQueryParams(clusterUrl, map[string]string{"updateMask": strings.Join(clusterUpdateMask, ",")}) +if err != nil { + return err +} + +// check if there is anything to update to avoid API call if not required. +if len(clusterUpdateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: clusterPatchUrl, + UserAgent: userAgent, + Body: clusterObj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating magament cluster %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating magament cluster %q: %#v", d.Id(), res) + } + + err = VmwareengineOperationWaitTime( + config, res, project, "Updating Managment Cluster", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/tagtemplate_fields.go.tmpl b/mmv1/templates/terraform/post_update/go/tagtemplate_fields.go.tmpl new file mode 100644 index 000000000000..fac8f72d0a98 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/tagtemplate_fields.go.tmpl @@ -0,0 +1,133 @@ +} + + +// since fields have a separate endpoint, +// we need to handle it manually + +type FieldChange struct { + Old, New map[string]interface{} +} + +o, n := d.GetChange("fields") +vals := make(map[string]*FieldChange) + +// this will create a dictionary with the value +// of field_id as the key that will contain the +// maps of old and new values +for _, raw := range o.(*schema.Set).List() { + obj := raw.(map[string]interface{}) + k := obj["field_id"].(string) + vals[k] = &FieldChange{Old: obj} +} + +for _, raw := range n.(*schema.Set).List() { + obj := raw.(map[string]interface{}) + k := obj["field_id"].(string) + if _, ok := vals[k]; !ok { + // if key is not present in the vals, + // then create an empty object to hold the new value + vals[k] = &FieldChange{} + } + vals[k].New = obj +} + +// fields schema to create schema.set below +dataCatalogTagTemplateFieldsSchema := &schema.Resource{ + Schema: ResourceDataCatalogTagTemplate().Schema["fields"].Elem.(*schema.Resource).Schema, +} + +for name, change := range vals { + // A few different situations to deal with in here: + // - change.Old is nil: create a new role + // - change.New is nil: remove an existing role + // - both are set: test if New is different than Old and update if so + + changeOldSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) + changeOldSet.Add(change.Old) + var changeOldProp map[string]interface{} + if len(change.Old) != 0 { + changeOldProp, _ = expandDataCatalogTagTemplateFields(changeOldSet, nil, nil) + changeOldProp = changeOldProp[name].(map[string]interface{}) + } + + changeNewSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) + changeNewSet.Add(change.New) + var changeNewProp map[string]interface{} + if len(change.New) != 0 { + changeNewProp, _ = expandDataCatalogTagTemplateFields(changeNewSet, nil, nil) + changeNewProp = changeNewProp[name].(map[string]interface{}) + } + + // if old state is empty, then we have a new field to create + if len(change.Old) == 0 { + err := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) + if err != nil { + return err + } + + continue + } + + // if new state is empty, then we need to delete the current field + if len(change.New) == 0 { + err := deleteTagTemplateField(d, config, name, billingProject, userAgent) + if err != nil { + return err + } + + continue + } + + // if we have old and new values, but are not equal, update with the new state + if !reflect.DeepEqual(changeOldProp, changeNewProp) { + url1, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}DataCatalogBasePath{{"}}"}}{{"{{"}}name{{"}}"}}/fields/"+name) + if err != nil { + return err + } + + oldType := changeOldProp["type"].(map[string]interface{}) + newType := changeNewProp["type"].(map[string]interface{}) + + if oldType["primitiveType"] != newType["primitiveType"] { + // As primitiveType can't be changed, it is considered as ForceNew which triggers the deletion of old field and recreation of a new field + // Before that, we need to check that is_required is True for the newType or not, as we don't have support to add new required field in the existing TagTemplate, + // So in such cases, we can simply return the error + + // Reason for checking the isRequired in changeNewProp - + // Because this changeNewProp check should be ignored when the user wants to update the primitive type and make it optional rather than keeping it required. + if changeNewProp["isRequired"] != nil && changeNewProp["isRequired"].(bool) { + return fmt.Errorf("Updating the primitive type for a required field on an existing tag template is not supported as TagTemplateField %q is required", name) + } + + // delete changeOldProp + err_delete := deleteTagTemplateField(d, config, name, billingProject, userAgent) + if err_delete != nil { + return err_delete + } + + // recreate changeNewProp + err_create := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) + if err_create != nil { + return err_create + } + + log.Printf("[DEBUG] Finished updating TagTemplate Field %q", name) + return resourceDataCatalogTagTemplateRead(d, meta) + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url1, + UserAgent: userAgent, + Body: changeNewProp, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return fmt.Errorf("Error updating TagTemplate Field %v: %s", name, err) + } + + log.Printf("[DEBUG] Finished updating TagTemplate Field %q: %#v", name, res) + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl new file mode 100644 index 000000000000..cd216f018387 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl @@ -0,0 +1,20 @@ +state := d.Get("state").(string) +desired_state := d.Get("desired_state").(string) + +if state != desired_state { + verb := "start" + if desired_state == "STOPPED" { + verb = "stop" + } + pRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, verb) + if err != nil { + return err + } + + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, pRes); err != nil { + return fmt.Errorf("Error waiting to modify Workbench Instance state: %s", err) + } + +} else { + log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) +} diff --git a/mmv1/templates/terraform/pre_create/access_approval_settings.go.erb b/mmv1/templates/terraform/pre_create/access_approval_settings.go.erb deleted file mode 100644 index 03f06b6edc6e..000000000000 --- a/mmv1/templates/terraform/pre_create/access_approval_settings.go.erb +++ /dev/null @@ -1 +0,0 @@ -<%= lines(compile(pwd + '/templates/terraform/update_mask.erb')) if object.update_mask -%> \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/filestore_instance.go.erb b/mmv1/templates/terraform/pre_create/filestore_instance.go.erb index aadd513207e4..28144ef10c8e 100644 --- a/mmv1/templates/terraform/pre_create/filestore_instance.go.erb +++ b/mmv1/templates/terraform/pre_create/filestore_instance.go.erb @@ -10,7 +10,7 @@ } if strings.Contains(url, "locations//") { // re-compute url now that location must be set - url, err = tpgresource.ReplaceVars(d, config, "<%= "{{#{object.__product.name}BasePath}}#{object.create_uri}" -%>") + url, err = tpgresource.ReplaceVars(d, config, "{{<%=object.__product.name-%>BasePath}}<%=object.create_uri-%>") if err != nil { return err } diff --git a/mmv1/templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl b/mmv1/templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl new file mode 100644 index 000000000000..7e9f036c027b --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl @@ -0,0 +1 @@ +obj["use_explicit_dry_run_spec"] = true diff --git a/mmv1/templates/terraform/pre_create/go/alloydb_cluster.go.tmpl b/mmv1/templates/terraform/pre_create/go/alloydb_cluster.go.tmpl new file mode 100644 index 000000000000..87ac9c897160 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/alloydb_cluster.go.tmpl @@ -0,0 +1,65 @@ +// Read the restore variables from obj and remove them, since they do not map to anything in the cluster +var backupSource interface{} +var continuousBackupSource interface{} +if val, ok := obj["restoreBackupSource"]; ok { + backupSource = val + delete(obj, "restoreBackupSource") +} +if val, ok := obj["restoreContinuousBackupSource"]; ok { + continuousBackupSource = val + delete(obj, "restoreContinuousBackupSource") +} + +restoreClusterRequestBody := make(map[string]interface{}) +if backupSource != nil { + // If restoring from a backup, set the backupSource + restoreClusterRequestBody["backup_source"] = backupSource +} else if continuousBackupSource != nil { + // Otherwise if restoring via PITR, set the continuousBackupSource + restoreClusterRequestBody["continuous_backup_source"] = continuousBackupSource +} + +if backupSource != nil || continuousBackupSource != nil { + // Use restore API if this is a restore instead of a create cluster call + url = strings.Replace(url, "clusters?clusterId", "clusters:restore?clusterId", 1) + + // Copy obj which contains the cluster into a cluster map + cluster := make(map[string]interface{}) + for k,v := range obj { + cluster[k] = v + } + restoreClusterRequestBody["cluster"] = cluster + obj = restoreClusterRequestBody +} + + +// Read the secondary cluster config to call the api for creating secondary cluster + +var secondaryConfig interface{} +var clusterType interface{} + +if val, ok := obj["secondaryConfig"]; ok { + secondaryConfig = val +} + +if val, ok := obj["clusterType"]; ok { + clusterType = val +} + +if clusterType == "SECONDARY" { + if secondaryConfig != nil { + // Use createsecondary API if this is a secondary cluster + url = strings.Replace(url, "clusters?clusterId", "clusters:createsecondary?cluster_id", 1) + + // Validation error if secondary_config is not defined + } else { + return fmt.Errorf("Error creating cluster. Can not create secondary cluster without secondary_config field.") + } +} + +// Validation error if secondary_config is defined but, cluster type is not secondary +if secondaryConfig != nil { + if clusterType != "SECONDARY" { + return fmt.Errorf("Error creating cluster. Add {cluster_type: \"SECONDARY\"} if attempting to create a secondary cluster, otherwise remove the secondary_config.") + } +} diff --git a/mmv1/templates/terraform/pre_create/go/alloydb_instance.go.tmpl b/mmv1/templates/terraform/pre_create/go/alloydb_instance.go.tmpl new file mode 100644 index 000000000000..ba3d97920cd4 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/alloydb_instance.go.tmpl @@ -0,0 +1,5 @@ +// Read the config and call createsecondary api if instance_type is SECONDARY + +if instanceType := d.Get("instance_type"); instanceType == "SECONDARY" { + url = strings.Replace(url, "instances?instanceId", "instances:createsecondary?instanceId", 1) +} diff --git a/mmv1/templates/terraform/pre_create/go/artifact_registry_remote_repository.go.tmpl b/mmv1/templates/terraform/pre_create/go/artifact_registry_remote_repository.go.tmpl new file mode 100644 index 000000000000..bad5d0a9599b --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/artifact_registry_remote_repository.go.tmpl @@ -0,0 +1,18 @@ +// This file should be deleted in the next major terraform release, alongside +// the default values for 'publicRepository'. + +// deletePublicRepoIfCustom deletes the publicRepository key for a given +// pkg type from the remote repository config if customRepository is set. +deletePublicRepoIfCustom := func(pkgType string) { + if _, ok := d.GetOk(fmt.Sprintf("remote_repository_config.0.%s_repository.0.custom_repository", pkgType)); ok { + rrcfg := obj["remoteRepositoryConfig"].(map[string]interface{}) + repo := rrcfg[fmt.Sprintf("%sRepository", pkgType)].(map[string]interface{}) + delete(repo, "publicRepository") + } +} + +// Call above func for all pkg types that support custom remote repos. +deletePublicRepoIfCustom("docker") +deletePublicRepoIfCustom("maven") +deletePublicRepoIfCustom("npm") +deletePublicRepoIfCustom("python") \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/cloud_asset_feed.go.tmpl b/mmv1/templates/terraform/pre_create/go/cloud_asset_feed.go.tmpl new file mode 100644 index 000000000000..3f6167a7f11d --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/cloud_asset_feed.go.tmpl @@ -0,0 +1,4 @@ + +// Send the project ID in the X-Goog-User-Project header. +origUserProjectOverride := config.UserProjectOverride +config.UserProjectOverride = true diff --git a/mmv1/templates/terraform/pre_create/go/compute_global_address.go.tmpl b/mmv1/templates/terraform/pre_create/go/compute_global_address.go.tmpl new file mode 100644 index 000000000000..bdf5ae5d293f --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/compute_global_address.go.tmpl @@ -0,0 +1,5 @@ +// Note: Global external IP addresses and internal IP addresses are always Premium Tier. +// An address with type INTERNAL cannot have a network tier +if addressTypeProp != "INTERNAL" { + obj["networkTier"] = "PREMIUM" +} diff --git a/mmv1/templates/terraform/pre_create/go/compute_node_group_url_replace.go.tmpl b/mmv1/templates/terraform/pre_create/go/compute_node_group_url_replace.go.tmpl new file mode 100644 index 000000000000..a85b927ba68a --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/compute_node_group_url_replace.go.tmpl @@ -0,0 +1,12 @@ +var sizeParam string +if v, ok := d.GetOkExists("initial_size"); ok { + sizeParam = fmt.Sprintf("%v", v) +}else{ + if _, ok := d.GetOkExists("autoscaling_policy"); ok{ + sizeParam = fmt.Sprintf("%v", d.Get("autoscaling_policy.min_nodes")) + }else{ + return errors.New("An initial_size or autoscaling_policy must be configured on node group creation.") + } +} + +url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sizeParam) diff --git a/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl b/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl new file mode 100644 index 000000000000..32227c290959 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl @@ -0,0 +1,3 @@ + +url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) + diff --git a/mmv1/templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl b/mmv1/templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl new file mode 100644 index 000000000000..6fbc20413561 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl @@ -0,0 +1,15 @@ + +// extract location from the parent +location := "" + +if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] +} else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/...", + ) +} + +url = strings.Replace(url,"-dialogflow",fmt.Sprintf("%s-dialogflow",location),1) diff --git a/mmv1/templates/terraform/pre_create/go/dialogflowcx_set_location_skip_default_obj.go.tmpl b/mmv1/templates/terraform/pre_create/go/dialogflowcx_set_location_skip_default_obj.go.tmpl new file mode 100644 index 000000000000..9de481fcc568 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/dialogflowcx_set_location_skip_default_obj.go.tmpl @@ -0,0 +1,43 @@ + +// extract location from the parent +location := "" + +if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] +} else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/...", + ) +} + +url = strings.Replace(url,"-dialogflow",fmt.Sprintf("%s-dialogflow",location),1) + +// if it's a default object Dialogflow creates for you, "Update" instead of "Create" +// Note: below we try to access fields that aren't present in the resource, because this custom code is reused across multiple Dialogflow resources that contain different fields. When the field isn't present, we deliberately ignore the error and the boolean is false. +isDefaultStartFlow, _ := d.Get("is_default_start_flow").(bool) +isDefaultWelcomeIntent, _ := d.Get("is_default_welcome_intent").(bool) +isDefaultNegativeIntent, _ := d.Get("is_default_negative_intent").(bool) +if isDefaultStartFlow || isDefaultWelcomeIntent || isDefaultNegativeIntent { + // hardcode the default object ID: + var defaultObjName string + if isDefaultStartFlow || isDefaultWelcomeIntent { + defaultObjName = "00000000-0000-0000-0000-000000000000" + } + if isDefaultNegativeIntent { + defaultObjName = "00000000-0000-0000-0000-000000000001" + } + + // Store the ID + d.Set("name", defaultObjName) + id, err := tpgresource.ReplaceVars(d, config, "{{$.GetIdFormat}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // and defer to the Update method: + log.Printf("[DEBUG] Updating default {{$.ResourceName}}") + return resource{{$.ResourceName}}Update(d, meta) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/document_ai_processor_default_version_interpolate_location.go.tmpl b/mmv1/templates/terraform/pre_create/go/document_ai_processor_default_version_interpolate_location.go.tmpl new file mode 100644 index 000000000000..c9a6a3942a59 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/document_ai_processor_default_version_interpolate_location.go.tmpl @@ -0,0 +1,5 @@ +if strings.Contains(url,"https://-"){ + location := tpgresource.GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/filestore_instance.go.tmpl b/mmv1/templates/terraform/pre_create/go/filestore_instance.go.tmpl new file mode 100644 index 000000000000..937aa46ab4c8 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/filestore_instance.go.tmpl @@ -0,0 +1,17 @@ + if d.Get("location") == "" { + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + err = d.Set("location", zone) + if err != nil { + return err + } + } + if strings.Contains(url, "locations//") { + // re-compute url now that location must be set + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.CreateUri}}") + if err != nil { + return err + } + } diff --git a/mmv1/templates/terraform/pre_create/go/firebase_project.go.tmpl b/mmv1/templates/terraform/pre_create/go/firebase_project.go.tmpl new file mode 100644 index 000000000000..22173c673173 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/firebase_project.go.tmpl @@ -0,0 +1,11 @@ +// Check if Firebase has already been enabled +existingId, err := getExistingFirebaseProjectId(config, d, billingProject, userAgent) +if err != nil { + return fmt.Errorf("Error checking if Firebase is already enabled: %s", err) +} + +if existingId != "" { + log.Printf("[DEBUG] Firebase is already enabled for project %s", project) + d.SetId(existingId) + return resourceFirebaseProjectRead(d, meta) +} diff --git a/mmv1/templates/terraform/pre_create/go/network_endpoints.go.tmpl b/mmv1/templates/terraform/pre_create/go/network_endpoints.go.tmpl new file mode 100644 index 000000000000..c4cb461a6669 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/network_endpoints.go.tmpl @@ -0,0 +1,7 @@ +chunkSize := 500 // API only accepts 500 endpoints at a time +lastPage, err := networkEndpointsPaginatedMutate(d, obj["networkEndpoints"].([]interface{}), config, userAgent, url, project, billingProject, chunkSize, true) +if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err +} +obj["networkEndpoints"] = lastPage \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/os_login_ssh_public_key.go.tmpl b/mmv1/templates/terraform/pre_create/go/os_login_ssh_public_key.go.tmpl new file mode 100644 index 000000000000..5921bb1fe108 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/os_login_ssh_public_key.go.tmpl @@ -0,0 +1,20 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// Don't use `GetProject()` because we only want to set the project in the URL +// if the user set it explicitly on the resource. +if p, ok := d.GetOk("project"); ok { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"projectId": p.(string)}) + if err != nil { + return err + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/privateca_certificate.go.tmpl b/mmv1/templates/terraform/pre_create/go/privateca_certificate.go.tmpl new file mode 100644 index 000000000000..f2a69e30a30c --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/privateca_certificate.go.tmpl @@ -0,0 +1,19 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// Only include linked certificate authority if the user specified it +if p, ok := d.GetOk("certificate_authority"); ok { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"issuingCertificateAuthorityId": p.(string)}) + if err != nil { + return err + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/privateca_certificate_authority.go.tmpl b/mmv1/templates/terraform/pre_create/go/privateca_certificate_authority.go.tmpl new file mode 100644 index 000000000000..7f109cffe5ec --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/privateca_certificate_authority.go.tmpl @@ -0,0 +1,3 @@ +// Drop `subordinateConfig` as it can not be set during CA creation. +// It can be used to activate CA during post_create or pre_update. +delete(obj, "subordinateConfig") diff --git a/mmv1/templates/terraform/pre_delete/go/alloydb_cluster.go.tmpl b/mmv1/templates/terraform/pre_delete/go/alloydb_cluster.go.tmpl new file mode 100644 index 000000000000..743d7e857912 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/alloydb_cluster.go.tmpl @@ -0,0 +1,4 @@ +// Forcefully delete the secondary cluster and the dependent instances because deletion of secondary instance is not supported. +if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "FORCE" { + url = url + "?force=true" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/alloydb_instance.go.tmpl b/mmv1/templates/terraform/pre_delete/go/alloydb_instance.go.tmpl new file mode 100644 index 000000000000..68d0ca605bc4 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/alloydb_instance.go.tmpl @@ -0,0 +1,18 @@ +// Read the config and avoid calling the delete API if the instance_type is SECONDARY and instead return nil +// Returning nil is equivalent of returning a success message to the users +// This is done because deletion of secondary instance is not supported +// Instead users should be deleting the secondary cluster which will forcefully delete the associated secondary instance +// A warning message prompts the user to delete the associated secondary cluster. +// Users can always undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import + +var instanceType interface{} +instanceTypeProp, err := expandAlloydbInstanceInstanceType(d.Get("instance_type"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("instance_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { + instanceType = instanceTypeProp +} +if instanceType != nil && instanceType == "SECONDARY" { + log.Printf("[WARNING] This operation didn't delete the Secondary Instance %q. Please delete the associated Secondary Cluster as well to delete the entire cluster and the secondary instance.\n", d.Id()) + return nil +} diff --git a/mmv1/templates/terraform/pre_delete/go/compute_disk_resource_policies_attachment.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_disk_resource_policies_attachment.go.tmpl new file mode 100644 index 000000000000..3eb72439e907 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_disk_resource_policies_attachment.go.tmpl @@ -0,0 +1,24 @@ +obj = make(map[string]interface{}) + +zone, err := tpgresource.GetZone(d, config) +if err != nil { + return err +} +if zone == "" { + return fmt.Errorf("zone must be non-empty - set in resource or at provider-level") +} + +// resourcePolicies are referred to by region but affixed to zonal disks. +// We construct the regional name from the zone: +// projects/{project}/regions/{region}/resourcePolicies/{resourceId} +region := tpgresource.GetRegionFromZone(zone) +if region == "" { + return fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) +} + +name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} +} diff --git a/mmv1/templates/terraform/pre_delete/go/compute_global_network_endpoint.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_global_network_endpoint.go.tmpl new file mode 100644 index 000000000000..53144f4d14b6 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_global_network_endpoint.go.tmpl @@ -0,0 +1,28 @@ +toDelete := make(map[string]interface{}) +portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) +if err != nil { + return err +} +if portProp != "" && portProp != 0 { + toDelete["port"] = portProp +} + +ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) +if err != nil { + return err +} +if ipAddressProp != "" { + toDelete["ipAddress"] = ipAddressProp +} + +fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) +if err != nil { + return err +} +if fqdnProp != "" { + toDelete["fqdn"] = fqdnProp +} + +obj = map[string]interface{}{ + "networkEndpoints": []map[string]interface{}{toDelete}, +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/compute_instance_group_membership.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_instance_group_membership.go.tmpl new file mode 100644 index 000000000000..e0320e2cefa7 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_instance_group_membership.go.tmpl @@ -0,0 +1,12 @@ +toDelete := make(map[string]interface{}) + +// Instance +instanceProp := flattenNestedComputeInstanceGroupMembershipInstance(d.Get("instance"), d, config) + +if instanceProp != "" { + toDelete["instance"] = instanceProp +} + +obj = map[string]interface{}{ + "instances": []map[string]interface{}{toDelete}, +} diff --git a/mmv1/templates/terraform/pre_delete/go/compute_network_endpoint.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_network_endpoint.go.tmpl new file mode 100644 index 000000000000..6ea4af458237 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_network_endpoint.go.tmpl @@ -0,0 +1,26 @@ +toDelete := make(map[string]interface{}) +instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) +if err != nil { + return err +} +if instanceProp != "" { + toDelete["instance"] = instanceProp +} + +portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) +if err != nil { + return err +} +if portProp != 0 { + toDelete["port"] = portProp +} + +ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) +if err != nil { + return err +} +toDelete["ipAddress"] = ipAddressProp + +obj = map[string]interface{}{ + "networkEndpoints": []map[string]interface{}{toDelete}, +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/compute_network_endpoints.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_network_endpoints.go.tmpl new file mode 100644 index 000000000000..d718daac7b4c --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_network_endpoints.go.tmpl @@ -0,0 +1,41 @@ +var endpointsToDelete []interface{} + +endpoints := d.Get("network_endpoints").(*schema.Set).List() + +for _, e := range(endpoints) { + endpoint := e.(map[string]interface{}) + toDelete := make(map[string]interface{}) + instanceProp, err := expandNestedComputeNetworkEndpointInstance(endpoint["instance"], d, config) + if err != nil { + return err + } + if instanceProp != "" { + toDelete["instance"] = instanceProp + } + + portProp, err := expandNestedComputeNetworkEndpointPort(endpoint["port"], d, config) + if err != nil { + return err + } + if portProp != 0 { + toDelete["port"] = portProp + } + + ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(endpoint["ip_address"], d, config) + if err != nil { + return err + } + toDelete["ipAddress"] = ipAddressProp + endpointsToDelete = append(endpointsToDelete, toDelete) +} + +chunkSize := 500 // API only accepts 500 endpoints at a time +lastPage, err := networkEndpointsPaginatedMutate(d, endpointsToDelete, config, userAgent, url, project, billingProject, chunkSize, true) +if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err +} + +obj = map[string]interface{}{ + "networkEndpoints": lastPage, +} diff --git a/mmv1/templates/terraform/pre_delete/go/compute_region_disk_resource_policies_attachment.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_region_disk_resource_policies_attachment.go.tmpl new file mode 100644 index 000000000000..e1358b1ee4f7 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_region_disk_resource_policies_attachment.go.tmpl @@ -0,0 +1,16 @@ +obj = make(map[string]interface{}) + +region, err := tpgresource.GetRegion(d, config) +if err != nil { + return err +} +if region == "" { + return fmt.Errorf("region must be non-empty - set in resource or at provider-level") +} + +name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} +} diff --git a/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl new file mode 100644 index 000000000000..732a63059365 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl @@ -0,0 +1,32 @@ +toDelete := make(map[string]interface{}) + +// Port +portProp, err := expandNestedComputeRegionNetworkEndpointPort(d.Get("port"), d, config) +if err != nil { + return err +} +if portProp != 0 { + toDelete["port"] = portProp +} + +// IP address +ipAddressProp, err := expandNestedComputeRegionNetworkEndpointIpAddress(d.Get("ip_address"), d, config) +if err != nil { + return err +} +if ipAddressProp != "" { + toDelete["ipAddress"] = ipAddressProp +} + +// FQDN +fqdnProp, err := expandNestedComputeRegionNetworkEndpointFqdn(d.Get("fqdn"), d, config) +if err != nil { + return err +} +if fqdnProp != "" { + toDelete["fqdn"] = fqdnProp +} + +obj = map[string]interface{}{ + "networkEndpoints": []map[string]interface{}{toDelete}, +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/container_attached_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/container_attached_deletion_policy.go.tmpl new file mode 100644 index 000000000000..a1dc101783f4 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/container_attached_deletion_policy.go.tmpl @@ -0,0 +1,8 @@ +if v, ok := d.GetOk("deletion_policy"); ok { + if v == "DELETE_IGNORE_ERRORS" { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"ignore_errors": "true"}) + if err != nil { + return err + } + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/detach_disk.tmpl b/mmv1/templates/terraform/pre_delete/go/detach_disk.tmpl new file mode 100644 index 000000000000..5af9ef6a2274 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/detach_disk.tmpl @@ -0,0 +1,67 @@ +readRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, +{{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, +{{- end }} +{{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, +{{- end }} +}) +if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) +} + +// if disks are attached to instances, they must be detached before the disk can be deleted +if v, ok := readRes["users"].([]interface{}); ok { + type detachArgs struct{ project, zone, instance, deviceName string } + var detachCalls []detachArgs + + for _, instance := range tpgresource.ConvertStringArr(v) { + self := d.Get("self_link").(string) + instanceProject, instanceZone, instanceName, err := tpgresource.GetLocationalResourcePropertiesFromSelfLinkString(instance) + if err != nil { + return err + } + + i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) + continue + } + return fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) + } + for _, disk := range i.Disks { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, self, nil) { + detachCalls = append(detachCalls, detachArgs{ + project: instanceProject, + zone: tpgresource.GetResourceNameFromSelfLink(i.Zone), + instance: i.Name, + deviceName: disk.DeviceName, + }) + } + } + } + + for _, call := range detachCalls { + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() + if err != nil { + return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, + call.zone, call.instance, err.Error()) + } + err = ComputeOperationWaitTime(config, op, call.project, + fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + var opErr ComputeOperationError + if errors.As(err, &opErr) && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { + log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) + continue + } + return err + } + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/detach_network.tmpl b/mmv1/templates/terraform/pre_delete/go/detach_network.tmpl new file mode 100644 index 000000000000..efe93514b428 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/detach_network.tmpl @@ -0,0 +1,29 @@ +// if networks are attached, they need to be detached before the policy can be deleted +if d.Get("networks.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["networks"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}DNSBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/policies/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), +{{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, +{{- end }} +{{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, +{{- end }} + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/dialogflowcx_set_location_skip_default_obj.go.tmpl b/mmv1/templates/terraform/pre_delete/go/dialogflowcx_set_location_skip_default_obj.go.tmpl new file mode 100644 index 000000000000..2e887423d246 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/dialogflowcx_set_location_skip_default_obj.go.tmpl @@ -0,0 +1,26 @@ + +// extract location from the parent +location := "" + +if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] +} else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/...", + ) +} + +url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + +// if it's a default object Dialogflow creates for you, skip deletion +// Note: below we try to access fields that aren't present in the resource, because this custom code is reused across multiple Dialogflow resources that contain different fields. When the field isn't present, we deliberately ignore the error and the boolean is false. +isDefaultStartFlow, _ := d.Get("is_default_start_flow").(bool) +isDefaultWelcomeIntent, _ := d.Get("is_default_welcome_intent").(bool) +isDefaultNegativeIntent, _ := d.Get("is_default_negative_intent").(bool) +if isDefaultStartFlow || isDefaultWelcomeIntent || isDefaultNegativeIntent { + // we can't delete these resources so do nothing + log.Printf("[DEBUG] Not deleting default {{$.ResourceName}}") + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/firebase_database_instance.go.tmpl b/mmv1/templates/terraform/pre_delete/go/firebase_database_instance.go.tmpl new file mode 100644 index 000000000000..379f351dc1c5 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/firebase_database_instance.go.tmpl @@ -0,0 +1,24 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +// start of customized code +if d.Get("state").(string) == "ACTIVE" { + if err := disableRTDB(config, d, project, billingProject, userAgent); err != nil { + return err + } +} +if d.Get("type").(string) == "DEFAULT_DATABASE" { + log.Printf("[WARN] Default Firebase Database Instance %q cannot be deleted, left disabled", d.Id()) + return nil +} +// end of customized code diff --git a/mmv1/templates/terraform/pre_delete/go/firestore_database.go.tmpl b/mmv1/templates/terraform/pre_delete/go/firestore_database.go.tmpl new file mode 100644 index 000000000000..2f396d07df3d --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/firestore_database.go.tmpl @@ -0,0 +1,7 @@ +if deletionPolicy := d.Get("deletion_policy"); deletionPolicy != "DELETE" { + log.Printf("[WARN] Firestore database %q deletion_policy is not set to 'DELETE', skipping deletion", d.Get("name").(string)) + return nil +} +if deleteProtection := d.Get("delete_protection_state"); deleteProtection == "DELETE_PROTECTION_ENABLED" { + return fmt.Errorf("Cannot delete Firestore database %s: Delete Protection is enabled. Set delete_protection_state to DELETE_PROTECTION_DISABLED for this resource and run \"terraform apply\" before attempting to delete it.", d.Get("name").(string)) +} diff --git a/mmv1/templates/terraform/pre_delete/go/interconnect_attachment.go.tmpl b/mmv1/templates/terraform/pre_delete/go/interconnect_attachment.go.tmpl new file mode 100644 index 000000000000..03e41d0e5da3 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/interconnect_attachment.go.tmpl @@ -0,0 +1,3 @@ +if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) +} diff --git a/mmv1/templates/terraform/pre_delete/go/managed_dns_zone.go.tmpl b/mmv1/templates/terraform/pre_delete/go/managed_dns_zone.go.tmpl new file mode 100644 index 000000000000..d5f4c64a7a51 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/managed_dns_zone.go.tmpl @@ -0,0 +1,70 @@ +if d.Get("force_destroy").(bool) { + zone := d.Get("name").(string) + token := "" + for paginate := true; paginate; { + var resp *dns.ResourceRecordSetsListResponse + if token == "" { + resp, err = config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).Do() + if err != nil { + return fmt.Errorf("Error reading ResourceRecordSets: %s", err) + } + } else { + resp, err = config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).PageToken(token).Do() + if err != nil { + return fmt.Errorf("Error reading ResourceRecordSets: %s", err) + } + } + + for _, rr := range resp.Rrsets { + // Build the change + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + { + Name: rr.Name, + Type: rr.Type, + Ttl: rr.Ttl, + Rrdatas: rr.Rrdatas, + }, + }, + } + + if rr.Type == "NS" { + mz, err := config.NewDnsClient(userAgent).ManagedZones.Get(project, zone).Do() + if err != nil { + return fmt.Errorf("Error retrieving managed zone %q from %q: %s", zone, project, err) + } + domain := mz.DnsName + + if domain == rr.Name { + log.Println("[DEBUG] NS records can't be deleted due to API restrictions, so they're being left in place. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/dns_record_set for more information.") + continue + } + } + + if rr.Type == "SOA" { + log.Println("[DEBUG] SOA records can't be deleted due to API restrictions, so they're being left in place.") + continue + } + + log.Printf("[DEBUG] DNS Record delete request via MZ: %#v", chg) + chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Unable to delete ResourceRecordSets: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.NewDnsClient(userAgent), + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + } + + token = resp.NextPageToken + paginate = token != "" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/modify_delete_url.tmpl b/mmv1/templates/terraform/pre_delete/go/modify_delete_url.tmpl new file mode 100644 index 000000000000..331772bb0b17 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/modify_delete_url.tmpl @@ -0,0 +1,8 @@ +// log the old URL to make the ineffassign linter happy +// in theory, we should find a way to disable the default URL and not construct +// both, but that's a problem for another day. Today, we cheat. +log.Printf("[DEBUG] replacing URL %q with a custom delete URL", url) +url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.BaseUrl}}/{{"{{"}}name{{"}}"}}") +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_delete/go/netapp_volume_force_delete.go.tmpl b/mmv1/templates/terraform/pre_delete/go/netapp_volume_force_delete.go.tmpl new file mode 100644 index 000000000000..d8fd2927332f --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/netapp_volume_force_delete.go.tmpl @@ -0,0 +1,4 @@ +// Delete volume even when nested snapshots do exist +if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "FORCE" { + url = url + "?force=true" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/netapp_volume_replication_stop_before_delete.go.tmpl b/mmv1/templates/terraform/pre_delete/go/netapp_volume_replication_stop_before_delete.go.tmpl new file mode 100644 index 000000000000..c609365ff492 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/netapp_volume_replication_stop_before_delete.go.tmpl @@ -0,0 +1,33 @@ +// A replication can only be deleted if mirrorState==STOPPED +// We are about to delete the replication and need to stop the mirror before. +// FYI: Stopping a PREPARING mirror currently doesn't work. User have to wait until +// mirror reaches MIRRORED. +if d.Get("mirror_state") != "STOPPED" { + rawurl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}NetappBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/volumes/{{"{{"}}volume_name{{"}}"}}/replications/{{"{{"}}name{{"}}"}}:stop") + if err != nil { + return err + } + + reso, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: rawurl, + UserAgent: userAgent, + // We delete anyway, so lets always use force stop + Body: map[string]interface{}{ + "force": true, + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error stopping volume replication %q before deleting it: %s", d.Id(), err) + } + + err = NetappOperationWaitTime( + config, reso, project, "Deleting volume replication", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/privateca_authority_disable.go.tmpl b/mmv1/templates/terraform/pre_delete/go/privateca_authority_disable.go.tmpl new file mode 100644 index 000000000000..09bb198ab02b --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/privateca_authority_disable.go.tmpl @@ -0,0 +1,31 @@ +if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy CertificateAuthority without setting deletion_protection=false and running `terraform apply`") +} + +if d.Get("state").(string) == "ENABLED" { + disableUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}PrivatecaBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/caPools/{{"{{"}}pool{{"}}"}}/certificateAuthorities/{{"{{"}}certificate_authority_id{{"}}"}}:disable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Disabling CertificateAuthority: %#v", obj) + + dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: disableUrl, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error disabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = PrivatecaOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Disabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting to disable CertificateAuthority: %s", err) + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/response_policy_detach_network_gke.tmpl b/mmv1/templates/terraform/pre_delete/go/response_policy_detach_network_gke.tmpl new file mode 100644 index 000000000000..6358cd83bac8 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/response_policy_detach_network_gke.tmpl @@ -0,0 +1,59 @@ +// if gke clusters are attached, they need to be detached before the response policy can be deleted +if d.Get("gke_clusters.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["gkeClusters"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}DNSBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/responsePolicies/{{"{{"}}response_policy_name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), +{{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, +{{- end }} +{{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, +{{- end }} + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } +} + +// if networks are attached, they need to be detached before the response policy can be deleted +if d.Get("networks.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["networks"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}DNSBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/responsePolicies/{{"{{"}}response_policy_name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), +{{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, +{{- end }} +{{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, +{{- end }} + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/restore_default_binaryauthorization_policy.tmpl b/mmv1/templates/terraform/pre_delete/go/restore_default_binaryauthorization_policy.tmpl new file mode 100644 index 000000000000..f011481771c8 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/restore_default_binaryauthorization_policy.tmpl @@ -0,0 +1,13 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +obj = DefaultBinaryAuthorizationPolicy(d.Get("project").(string)) diff --git a/mmv1/templates/terraform/pre_delete/go/secret_version_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/secret_version_deletion_policy.go.tmpl new file mode 100644 index 000000000000..d4beea4f6326 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/secret_version_deletion_policy.go.tmpl @@ -0,0 +1,22 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +deletionPolicy := d.Get("deletion_policy"); + +if deletionPolicy == "ABANDON" { + return nil +} else if deletionPolicy == "DISABLE" { + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}SecretManagerBasePath{{"}}"}}{{"{{"}}name{{"}}"}}:disable") + if err != nil { + return err + } +} diff --git a/mmv1/templates/terraform/pre_delete/go/spanner_instance.go.tmpl b/mmv1/templates/terraform/pre_delete/go/spanner_instance.go.tmpl new file mode 100644 index 000000000000..c4319d1d2ad8 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/spanner_instance.go.tmpl @@ -0,0 +1,24 @@ + +if d.Get("force_destroy").(bool) { + backupsUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}SpannerBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}/backups") + if err != nil { + return err + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: backupsUrl, + UserAgent: userAgent, + }) + if err != nil { + // API returns 200 if no backups exist but the instance still exists, hence the error check. + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SpannerInstance %q", d.Id())) + } + + err = deleteSpannerBackups(d, config, resp, billingProject, userAgent) + if err != nil { + return err + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/sql_database_deletion_policy.tmpl b/mmv1/templates/terraform/pre_delete/go/sql_database_deletion_policy.tmpl new file mode 100644 index 000000000000..c111f346b167 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/sql_database_deletion_policy.tmpl @@ -0,0 +1,5 @@ +if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { + // Allows for database to be abandoned without deletion to avoid deletion failing + // for Postgres databases in some circumstances due to existing SQL users + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/storage_hmac_key.go.tmpl b/mmv1/templates/terraform/pre_delete/go/storage_hmac_key.go.tmpl new file mode 100644 index 000000000000..cfcf3bb185ff --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/storage_hmac_key.go.tmpl @@ -0,0 +1,40 @@ +getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}StorageBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/hmacKeys/{{"{{"}}access_id{{"}}"}}") +if err != nil { + return err +} + +getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: getUrl, + UserAgent: userAgent, +}) +if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) +} + +// HmacKeys need to be INACTIVE to be deleted and the API doesn't accept noop +// updates +if v := getRes["state"]; v == "ACTIVE" { + getRes["state"] = "INACTIVE" + updateUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}StorageBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/hmacKeys/{{"{{"}}access_id{{"}}"}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Deactivating HmacKey %q: %#v", d.Id(), getRes) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: project, + RawURL: updateUrl, + UserAgent: userAgent, + Body: getRes, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error deactivating HmacKey %q: %s", d.Id(), err) + } +} + diff --git a/mmv1/templates/terraform/pre_delete/go/vertex_ai_force_delete.go.tmpl b/mmv1/templates/terraform/pre_delete/go/vertex_ai_force_delete.go.tmpl new file mode 100644 index 000000000000..08b164d8136f --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/vertex_ai_force_delete.go.tmpl @@ -0,0 +1,7 @@ + +if v, ok := d.GetOk("force_destroy"); ok { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"force": fmt.Sprintf("%v", v)}) + if err != nil { + return err + } +} diff --git a/mmv1/templates/terraform/pre_delete/modify_delete_url.erb b/mmv1/templates/terraform/pre_delete/modify_delete_url.erb index 657dc39012fa..738e5a9ad755 100644 --- a/mmv1/templates/terraform/pre_delete/modify_delete_url.erb +++ b/mmv1/templates/terraform/pre_delete/modify_delete_url.erb @@ -2,7 +2,7 @@ // in theory, we should find a way to disable the default URL and not construct // both, but that's a problem for another day. Today, we cheat. log.Printf("[DEBUG] replacing URL %q with a custom delete URL", url) -url, err = tpgresource.ReplaceVars(d, config, "<%= "{{#{object.__product.name}BasePath}}" -%><%=object.base_url-%>/{{name}}") +url, err = tpgresource.ReplaceVars(d, config, "{{<%=object.__product.name-%>BasePath}}<%=object.base_url-%>/{{name}}") if err != nil { return err } diff --git a/mmv1/templates/terraform/pre_read/go/cloudbuild_trigger.go.tmpl b/mmv1/templates/terraform/pre_read/go/cloudbuild_trigger.go.tmpl new file mode 100644 index 000000000000..861b31854090 --- /dev/null +++ b/mmv1/templates/terraform/pre_read/go/cloudbuild_trigger.go.tmpl @@ -0,0 +1,14 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + // To support import with the legacy id format. + url = strings.ReplaceAll(url, "/locations//", "/locations/global/") diff --git a/mmv1/templates/terraform/pre_read/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/pre_read/go/monitoring_monitored_project.go.tmpl new file mode 100644 index 000000000000..b86bd083636d --- /dev/null +++ b/mmv1/templates/terraform/pre_read/go/monitoring_monitored_project.go.tmpl @@ -0,0 +1,22 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +name := d.Get("name").(string) +name = tpgresource.GetResourceNameFromSelfLink(name) +d.Set("name", name) +metricsScope := d.Get("metrics_scope").(string) +metricsScope = tpgresource.GetResourceNameFromSelfLink(metricsScope) +d.Set("metrics_scope", metricsScope) +url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}MonitoringBasePath{{"}}"}}v1/locations/global/metricsScopes/{{"{{"}}metrics_scope{{"}}"}}") +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_read/go/secret_version_is_secret_data_base64.go.tmpl b/mmv1/templates/terraform/pre_read/go/secret_version_is_secret_data_base64.go.tmpl new file mode 100644 index 000000000000..3f343555c57a --- /dev/null +++ b/mmv1/templates/terraform/pre_read/go/secret_version_is_secret_data_base64.go.tmpl @@ -0,0 +1,18 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// Explicitly set the field to default value if unset +if _, ok := d.GetOkExists("is_secret_data_base64"); !ok { + if err := d.Set("is_secret_data_base64", false); err != nil { + return fmt.Errorf("Error setting is_secret_data_base64: %s", err) + } +} diff --git a/mmv1/templates/terraform/pre_update/go/agent_pool.go.tmpl b/mmv1/templates/terraform/pre_update/go/agent_pool.go.tmpl new file mode 100644 index 000000000000..0a62105efaf3 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/agent_pool.go.tmpl @@ -0,0 +1,3 @@ +if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return fmt.Errorf("Error waiting for AgentPool %q to be CREATED before updating: %q", d.Get("name").(string), err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl b/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl new file mode 100644 index 000000000000..0c893e92f2cd --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl @@ -0,0 +1,72 @@ +// Restrict modification of cluster_type from PRIMARY to SECONDARY as it is an invalid operation +if d.HasChange("cluster_type") && d.Get("cluster_type") == "SECONDARY" { + return fmt.Errorf("Can not convert a primary cluster to a secondary cluster.") +} + +// Restrict setting secondary_config if cluster_type is PRIMARY +if d.Get("cluster_type") == "PRIMARY" && !tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("secondary_config"))) { + return fmt.Errorf("Can not set secondary config for primary cluster.") +} + +// Implementation for cluster promotion +if d.HasChange("cluster_type") && d.Get("cluster_type") == "PRIMARY" { + + if !d.HasChange("secondary_config") || !tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("secondary_config"))) { + return fmt.Errorf("Remove the secondary_config field to promote the cluster to primary cluster.") + } + + // If necassary precondition checks for cluster promotion is fine ONLY then + // Promote cluster as a separate implementation within the update logic + + promoteUrl := strings.Split(url, "?updateMask=")[0] + ":promote" + emptyObj := make(map[string]interface{}) + + // Remove promote changes from obj and updateMask + delete(obj, "clusterType") + delete(obj, "secondaryConfig") + + index := 0 + for _, label := range updateMask { + if label != "clusterType" && label != "secondaryConfig" { + updateMask[index] = label + index++ + } + } + updateMask = updateMask[:index] + + // Update url with the new updateMask + url := strings.Split(url, "?updateMask=")[0] + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: promoteUrl, + UserAgent: userAgent, + Body: emptyObj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error promoting Cluster: %s", err) + } + + err = AlloydbOperationWaitTime( + config, res, project, "Promoting Cluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error waiting to promote Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished promoting Cluster %q: %#v", d.Id(), res) + +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/bigquerydatatransfer_config.tmpl b/mmv1/templates/terraform/pre_update/go/bigquerydatatransfer_config.tmpl new file mode 100644 index 000000000000..fc28a618d867 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/bigquerydatatransfer_config.tmpl @@ -0,0 +1,52 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +updateMask := []string{} +if v, ok := d.GetOk("service_account_name"); ok { + if v != nil && d.HasChange("service_account_name") { + updateMask = append(updateMask, "serviceAccountName") + } +} +if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") +} +if d.HasChange("destination_dataset_id") { + updateMask = append(updateMask, "destinationDatasetId") +} +if d.HasChange("schedule") { + updateMask = append(updateMask, "schedule") +} +if d.HasChange("schedule_options") { + updateMask = append(updateMask, "scheduleOptions") +} +if d.HasChange("email_preferences") { + updateMask = append(updateMask, "emailPreferences") +} +if d.HasChange("notification_pubsub_topic") { + updateMask = append(updateMask, "notificationPubsubTopic") +} +if d.HasChange("data_refresh_window_days") { + updateMask = append(updateMask, "dataRefreshWindowDays") +} +if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") +} +if d.HasChange("params") { + updateMask = append(updateMask, "params") +} + +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl new file mode 100644 index 000000000000..db10a1ed9146 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl @@ -0,0 +1,38 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +if d.HasChange("multi_cluster_routing_cluster_ids") && !tpgresource.StringInSlice(updateMask, "multiClusterRoutingUseAny") { + updateMask = append(updateMask, "multiClusterRoutingUseAny") +} + +// this api requires the body to define something for all values passed into +// the update mask, however, multi-cluster routing and single-cluster routing +// are conflicting, so we can't have them both in the update mask, despite +// both of them registering as changing. thus, we need to remove whichever +// one is not defined. +newRouting, oldRouting := d.GetChange("multi_cluster_routing_use_any") +if newRouting != oldRouting { + for i, val := range updateMask { + if val == "multiClusterRoutingUseAny" && newRouting.(bool) || + val == "singleClusterRouting" && oldRouting.(bool) { + updateMask = append(updateMask[0:i], updateMask[i+1:]...) + break + } + } +} +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/certificate_manager_trust_config.go.tmpl b/mmv1/templates/terraform/pre_update/go/certificate_manager_trust_config.go.tmpl new file mode 100644 index 000000000000..a966dd00aa40 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/certificate_manager_trust_config.go.tmpl @@ -0,0 +1,4 @@ +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "*" }) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/cloudbuild_bitbucketserver_config.go.tmpl b/mmv1/templates/terraform/pre_update/go/cloudbuild_bitbucketserver_config.go.tmpl new file mode 100644 index 000000000000..48660dc6a80b --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/cloudbuild_bitbucketserver_config.go.tmpl @@ -0,0 +1,16 @@ +// remove connectedRepositories from updateMask +for i, field := range updateMask { + if field == "connectedRepositories" { + updateMask = append(updateMask[:i], updateMask[i+1:]...) + break + } +} +// reconstruct url +url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}CloudBuildBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bitbucketServerConfigs/{{"{{"}}config_id{{"}}"}}") +if err != nil { + return err +} +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/cloudbuild_trigger.go.tmpl b/mmv1/templates/terraform/pre_update/go/cloudbuild_trigger.go.tmpl new file mode 100644 index 000000000000..26b262783e00 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/cloudbuild_trigger.go.tmpl @@ -0,0 +1,13 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + obj["id"] = d.Get("trigger_id") diff --git a/mmv1/templates/terraform/pre_update/go/containeranalysis_note.tmpl b/mmv1/templates/terraform/pre_update/go/containeranalysis_note.tmpl new file mode 100644 index 000000000000..49a07d0251c7 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/containeranalysis_note.tmpl @@ -0,0 +1,22 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +updateMask := []string{} +if d.HasChange("attestation_authority.0.hint.0.human_readable_name") { + updateMask = append(updateMask, "attestationAuthority.hint.humanReadableName") +} +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/containerattached_update.go.tmpl b/mmv1/templates/terraform/pre_update/go/containerattached_update.go.tmpl new file mode 100644 index 000000000000..a9c403919777 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/containerattached_update.go.tmpl @@ -0,0 +1,33 @@ +// The generated code sets the wrong masks for the following fields. +newUpdateMask := []string{} +if d.HasChange("authorization.0.admin_users") { + newUpdateMask = append(newUpdateMask, "authorization.admin_users") +} +if d.HasChange("authorization.0.admin_groups") { + newUpdateMask = append(newUpdateMask, "authorization.admin_groups") +} +if d.HasChange("logging_config") { + newUpdateMask = append(newUpdateMask, "logging_config.component_config.enable_components") +} +if d.HasChange("monitoring_config") { + newUpdateMask = append(newUpdateMask, "monitoring_config.managed_prometheus_config.enabled") +} +if d.HasChange("binary_authorization") { + newUpdateMask = append(newUpdateMask, "binary_authorization.evaluation_mode") +} +if d.HasChange("proxy_config") { + newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.name") + newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.namespace") +} +// Pull out any other set fields from the generated mask. +for _, mask := range updateMask { + if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" { + continue + } + newUpdateMask = append(newUpdateMask, mask) +} +// Overwrite the previously set mask. +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/datafusion_instance_update.go.tmpl b/mmv1/templates/terraform/pre_update/go/datafusion_instance_update.go.tmpl new file mode 100644 index 000000000000..a92086dcfffc --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/datafusion_instance_update.go.tmpl @@ -0,0 +1,33 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +updateMask := []string{} + +if d.HasChange("enable_stackdriver_logging") { + updateMask = append(updateMask, "enableStackdriverLogging") +} + +if d.HasChange("enable_stackdriver_monitoring") { + updateMask = append(updateMask, "enableStackdriverMonitoring") +} + +if d.HasChange("enable_rbac") { + updateMask = append(updateMask, "enableRbac") +} + +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it + +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/pre_update/go/datastream_stream.go.tmpl new file mode 100644 index 000000000000..8d9e5710b201 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/datastream_stream.go.tmpl @@ -0,0 +1,39 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +if d.HasChange("desired_state") { + updateMask = append(updateMask, "state") +} + +// Override the previous setting of updateMask to include state. +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} + +// lables and terraform_labels fields are overriden with the labels before updating inside the function waitForDatastreamStreamReady +labels := d.Get("labels") +terraformLabels := d.Get("terraform_labels") + +if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED before updating: %q", d.Get("name").(string), err) +} + +if err := d.Set("labels", labels); err != nil { + return fmt.Errorf("Error setting back labels field: %s", err) +} +if err := d.Set("terraform_labels", terraformLabels); err != nil { + return fmt.Errorf("Error setting back terraform_labels field: %s", err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/firebase_database_instance.go.tmpl b/mmv1/templates/terraform/pre_update/go/firebase_database_instance.go.tmpl new file mode 100644 index 000000000000..628abd321399 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/firebase_database_instance.go.tmpl @@ -0,0 +1,33 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +// start of customized code +if d.HasChange("desired_state") { + if p, ok := d.GetOk("desired_state"); ok && p.(string) != d.Get("state").(string) { + switch p.(string) { + case "ACTIVE": + if err := enableRTDB(config, d, project, billingProject, userAgent); err != nil { + return err + } + case "DISABLED": + if err := disableRTDB(config, d, project, billingProject, userAgent); err != nil { + return err + } + default: + return fmt.Errorf("Unsupported value in field `desired_state`: %v", p) + } + } + // firebasedatabase does not update UpdateDatabaseInstance endpoint now. + return nil +} +// end of customized code diff --git a/mmv1/templates/terraform/pre_update/go/kms_crypto_key_version.go.tmpl b/mmv1/templates/terraform/pre_update/go/kms_crypto_key_version.go.tmpl new file mode 100644 index 000000000000..7db52d15e778 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/kms_crypto_key_version.go.tmpl @@ -0,0 +1,20 @@ +// The generated code does not support conditional update masks. +newUpdateMask := []string{} +if d.HasChange("state") { + newUpdateMask = append(newUpdateMask, "state") +} + +// Validate updated fields based on protection level (EXTERNAL vs EXTERNAL_VPC) +if d.HasChange("external_protection_level_options") { + if d.Get("protection_level") == "EXTERNAL" { + newUpdateMask = append(newUpdateMask, "externalProtectionLevelOptions.externalKeyUri") + } else if d.Get("protection_level") == "EXTERNAL_VPC" { + newUpdateMask = append(newUpdateMask, "externalProtectionLevelOptions.ekmConnectionKeyPath") + } +} +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/network_endpoints.go.tmpl b/mmv1/templates/terraform/pre_update/go/network_endpoints.go.tmpl new file mode 100644 index 000000000000..72ed12375f8a --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/network_endpoints.go.tmpl @@ -0,0 +1,62 @@ +detachUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/networkEndpointGroups/{{"{{"}}network_endpoint_group{{"}}"}}/detachNetworkEndpoints") +o, n := d.GetChange("network_endpoints") + +oldEndpoints := make(map[NetworkEndpointsNetworkEndpoint]struct{}) +newEndpoints := make(map[NetworkEndpointsNetworkEndpoint]struct{}) + +for _, e := range o.(*schema.Set).List() { + oldEndpoints[NetworkEndpointsNetworkEndpointConvertToStruct(e)] = struct{}{} +} + +for _, e := range n.(*schema.Set).List() { + newEndpoints[NetworkEndpointsNetworkEndpointConvertToStruct(e)] = struct{}{} +} + +// We want to ignore any endpoints that are shared between the two. +endpointsToKeep := make(map[NetworkEndpointsNetworkEndpoint]struct{}) +for e := range oldEndpoints { + if _, ok := newEndpoints[e]; ok { + endpointsToKeep[e] = struct{}{} + } +} +log.Printf("number of old endpoints: %v\n", len(oldEndpoints)) +log.Printf("number of new endpoints: %v\n", len(newEndpoints)) +log.Printf("number of shared endpoints: %v\n", len(endpointsToKeep)) + +for e := range endpointsToKeep { + // Removing all shared endpoints from the old endpoints yields the list of endpoints to detach. + delete(oldEndpoints, e) + // Removing all shared endpoints from the new endpoints yields the list of endpoints to attch. + delete(newEndpoints, e) +} + +var endpointsToDetach []interface{} +for e := range oldEndpoints { + endpointsToDetach = append(endpointsToDetach, NetworkEndpointsNetworkEndpointConvertToAny(e)) +} +var endpointsToAttach []interface{} +for e := range newEndpoints { + endpointsToAttach = append(endpointsToAttach, NetworkEndpointsNetworkEndpointConvertToAny(e)) +} + +log.Printf("number of endpoints to detach: %v\n", len(endpointsToDetach)) +log.Printf("number of endpoints to attach: %v\n", len(endpointsToAttach)) + + +chunkSize := 500 // API only accepts 500 endpoints at a time + +_, err = networkEndpointsPaginatedMutate(d, endpointsToDetach, config, userAgent, detachUrl, project, billingProject, chunkSize, false) +if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err +} + +lastPage, err := networkEndpointsPaginatedMutate(d, endpointsToAttach, config, userAgent, url, project, billingProject, chunkSize, true) +if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err +} + +obj = map[string]interface{}{ + "networkEndpoints": lastPage, +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/notebooks_runtime.go.tmpl b/mmv1/templates/terraform/pre_update/go/notebooks_runtime.go.tmpl new file mode 100644 index 000000000000..34d7c37fd14d --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/notebooks_runtime.go.tmpl @@ -0,0 +1,67 @@ +// remove virtualMachine from updateMask +callSwitch := false +for i, field := range updateMask { + if field == "virtualMachine" { + callSwitch = true + updateMask = append(updateMask[:i], updateMask[i+1:]...) + break + } +} + +if callSwitch { + // reconstruct url + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}NotebooksBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/runtimes/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + state := d.Get("state").(string) + if state == "INITIALIZING" { + time.Sleep(300 * time.Second) + } + + switchURL, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}NotebooksBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/runtimes/{{"{{"}}name{{"}}"}}:switch") + if err != nil { + return err + } + log.Printf("[DEBUG] Switching Runtime: %q", d.Id()) + + switchObj := make(map[string]interface{}) + machineType := d.Get("virtual_machine.0.virtual_machine_config.0.machine_type") + switchObj["machineType"] = machineType + + acceleratorConfigInterface := make(map[string]interface{}) + _, ok := d.GetOk("virtual_machine.0.virtual_machine_config.0.accelerator_config") + if ok { + acceleratorConfigInterface["coreCount"] = d.Get("virtual_machine.0.virtual_machine_config.0.accelerator_config.0.core_count") + acceleratorConfigInterface["type"] = d.Get("virtual_machine.0.virtual_machine_config.0.accelerator_config.0.type") + } + switchObj["acceleratorConfig"] = acceleratorConfigInterface + + + dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: switchURL, + UserAgent: userAgent, + Body: switchObj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error switching Runtime: %s", err) + } + + var opRes map[string]interface{} + err = NotebooksOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Switching runtime", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error switching runtime: %s", err) + } + +} diff --git a/mmv1/templates/terraform/pre_update/go/privateca_certificate_authority.go.tmpl b/mmv1/templates/terraform/pre_update/go/privateca_certificate_authority.go.tmpl new file mode 100644 index 000000000000..a921cf480800 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/privateca_certificate_authority.go.tmpl @@ -0,0 +1,43 @@ +if d.HasChange("subordinate_config") { + if d.Get("type").(string) != "SUBORDINATE" { + return fmt.Errorf("`subordinate_config` can only be configured on subordinate CA") + } + + // Activate subordinate CA in `AWAITING_USER_ACTIVATION` state. + if d.Get("state") == "AWAITING_USER_ACTIVATION" { + if _, ok := d.GetOk("pem_ca_certificate"); ok { + // Third party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with third party issuer") + if err := activateSubCAWithThirdPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with third party issuer: %v", err) + } + } else { + // First party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with first party issuer") + if err := activateSubCAWithFirstPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with first party issuer: %v", err) + } + } + log.Printf("[DEBUG] CertificateAuthority activated") + } +} + +log.Printf("[DEBUG] checking desired_state") +if d.HasChange("desired_state") { + // Currently, most CA state update operations are not idempotent. + // Try to change state only if the current `state` does not match the `desired_state`. + if p, ok := d.GetOk("desired_state"); ok && p.(string) != d.Get("state").(string) { + switch p.(string) { + case "ENABLED": + if err := enableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %v", err) + } + case "DISABLED": + if err := disableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error disabling CertificateAuthority: %v", err) + } + default: + return fmt.Errorf("Unsupported value in field `desired_state`") + } + } +} diff --git a/mmv1/templates/terraform/pre_update/go/secret_manager_secret.go.tmpl b/mmv1/templates/terraform/pre_update/go/secret_manager_secret.go.tmpl new file mode 100644 index 000000000000..0cd9dec5816d --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/secret_manager_secret.go.tmpl @@ -0,0 +1,35 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +replicationProp, err := expandSecretManagerSecretReplication(d.Get("replication"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("replication"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicationProp)) { + obj["replication"] = replicationProp +} + +if d.HasChange("replication") { + updateMask = append(updateMask, "replication") +} + +// As the API expects only one of ttl or expireTime +if d.HasChange("ttl") && !d.HasChange("expire_time") { + delete(obj, "expireTime") +} + +// Refreshing updateMask after adding extra schema entries +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} + +log.Printf("[DEBUG] Update URL %q: %v", d.Id(), url) diff --git a/mmv1/templates/terraform/pre_update/go/shared_reservation_update.go.tmpl b/mmv1/templates/terraform/pre_update/go/shared_reservation_update.go.tmpl new file mode 100644 index 000000000000..fa4b9203fbce --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/shared_reservation_update.go.tmpl @@ -0,0 +1,11 @@ +if d.HasChange("share_settings") { + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/reservations/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + urlUpdateMask := obj["urlUpdateMask"] + if urlUpdateMask != nil { + url = url + urlUpdateMask.(string) + delete(obj, "urlUpdateMask") + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl new file mode 100644 index 000000000000..0d28729a67eb --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl @@ -0,0 +1,30 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +if obj["statements"] != nil { + if len(obj["statements"].([]string)) == 0 { + // Return early to avoid making an API call that errors, + // due to containing no DDL SQL statements + d.Partial(false) + return resourceSpannerDatabaseRead(d, meta) + } +} + +if resourceSpannerDBVirtualUpdate(d, ResourceSpannerDatabase().Schema) { + if d.Get("deletion_protection") != nil { + if err := d.Set("deletion_protection", d.Get("deletion_protection")); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + } + return nil +} diff --git a/mmv1/templates/terraform/pre_update/go/spanner_instance.go.tmpl b/mmv1/templates/terraform/pre_update/go/spanner_instance.go.tmpl new file mode 100644 index 000000000000..f916db0a5364 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/spanner_instance.go.tmpl @@ -0,0 +1,8 @@ +if resourceSpannerInstanceVirtualUpdate(d, ResourceSpannerInstance().Schema) { + if d.Get("force_destroy") != nil { + if err := d.Set("force_destroy", d.Get("force_destroy")); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + } + return nil +} diff --git a/mmv1/templates/terraform/pre_update/go/tagtemplate_fields.go.tmpl b/mmv1/templates/terraform/pre_update/go/tagtemplate_fields.go.tmpl new file mode 100644 index 000000000000..325df245db41 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/tagtemplate_fields.go.tmpl @@ -0,0 +1,14 @@ +updateMask := []string{} + +if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") +} + +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} + +if len(updateMask) > 0 { \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/vertex_ai_index.go.tmpl b/mmv1/templates/terraform/pre_update/go/vertex_ai_index.go.tmpl new file mode 100644 index 000000000000..8f5c37a87477 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/vertex_ai_index.go.tmpl @@ -0,0 +1,22 @@ +newUpdateMask := []string{} + +if d.HasChange("metadata.0.contents_delta_uri") { + // Use the current value of isCompleteOverwrite when updating contentsDeltaUri + newUpdateMask = append(newUpdateMask, "metadata.contentsDeltaUri") + newUpdateMask = append(newUpdateMask, "metadata.isCompleteOverwrite") +} + +for _, mask := range updateMask { + // Use granular update masks instead of 'metadata' to avoid the following error: + // 'If `contents_delta_gcs_uri` is set as part of `index.metadata`, then no other Index fields can be also updated as part of the same update call.' + if mask == "metadata" { + continue + } + newUpdateMask = append(newUpdateMask, mask) +} + +// Refreshing updateMask after adding extra schema entries +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl new file mode 100644 index 000000000000..847a0bcd1311 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl @@ -0,0 +1,45 @@ +name := d.Get("name").(string) +if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerator_configs") || d.HasChange("gce_setup.0.shielded_instance_config"){ + state := d.Get("state").(string) + + if state != "STOPPED" { + dRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, "stop") + if err != nil { + return err + } + + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, dRes); err != nil { + return fmt.Errorf("Error stopping Workbench Instance: %s", err) + } + + } else { + log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) + } + +} else { + log.Printf("[DEBUG] Workbench Instance %q need not be stopped for the update.", name) +} + +// Build custom mask since the notebooks API does not support gce_setup as a valid mask +newUpdateMask := []string{} +if d.HasChange("gce_setup.0.machine_type") { + newUpdateMask = append(newUpdateMask, "gce_setup.machine_type") +} +if d.HasChange("gce_setup.0.accelerator_configs") { + newUpdateMask = append(newUpdateMask, "gce_setup.accelerator_configs") +} +if d.HasChange("gce_setup.0.shielded_instance_config") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config") +} +if d.HasChange("gce_setup.0.metadata") { + newUpdateMask = append(newUpdateMask, "gceSetup.metadata") +} +if d.HasChange("effective_labels") { + newUpdateMask = append(newUpdateMask, "labels") +} + +// Overwrite the previously set mask. +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/state_migrations/go/beyondcorp_app_gateway.go.tmpl b/mmv1/templates/terraform/state_migrations/go/beyondcorp_app_gateway.go.tmpl new file mode 100644 index 000000000000..fd604b2d6042 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/beyondcorp_app_gateway.go.tmpl @@ -0,0 +1,106 @@ +func resourceBeyondcorpAppGatewayResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the AppGateway.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An arbitrary user-provided name for the AppGateway.`, + }, + "host_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG", ""}), + Description: `The type of hosting used by the AppGateway. Default value: "HOST_TYPE_UNSPECIFIED" Possible values: ["HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG"]`, + Default: "HOST_TYPE_UNSPECIFIED", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Resource labels to represent user provided metadata. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the AppGateway.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "TCP_PROXY", ""}), + Description: `The type of network connectivity used by the AppGateway. Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "TCP_PROXY"]`, + Default: "TYPE_UNSPECIFIED", + }, + "allocated_connections": { + Type: schema.TypeList, + Computed: true, + Description: `A list of connections allocated for the Gateway.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ingress_port": { + Type: schema.TypeInt, + Optional: true, + Description: `The ingress port of an allocated connection.`, + }, + "psc_uri": { + Type: schema.TypeString, + Optional: true, + Description: `The PSC uri of an allocated connection.`, + }, + }, + }, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Represents the different states of a AppGateway.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URI for this resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceBeyondcorpAppGatewayUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/big_query_job.go.tmpl b/mmv1/templates/terraform/state_migrations/go/big_query_job.go.tmpl new file mode 100644 index 000000000000..71381431439a --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/big_query_job.go.tmpl @@ -0,0 +1,948 @@ +func resourceBigQueryJobResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "copy": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Copies a table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_tables": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Source tables to copy.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The table. Can be specified '{{"{{"}}table_id{{"}}"}}' if 'project_id' and 'dataset_id' are also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}' if not.`, + }, + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "create_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + Description: `Specifies whether the job is allowed to create new tables. The following values are supported: +CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. +CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. +Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, + Default: "CREATE_IF_NEEDED", + }, + "destination_encryption_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. +The BigQuery Service Account associated with your project requires access to this encryption key.`, + }, + "kms_key_version": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, + }, + }, + }, + }, + "destination_table": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The destination table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The table. Can be specified '{{"{{"}}table_id{{"}}"}}' if 'project_id' and 'dataset_id' are also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}' if not.`, + }, + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "write_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: +WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. +WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. +WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. +Each action is atomic and only occurs if BigQuery is able to complete the job successfully. +Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, + Default: "WRITE_EMPTY", + }, + }, + }, + ExactlyOneOf: []string{"query", "load", "copy", "extract"}, + }, + "extract": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configures an extract job.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "compression": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. +The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.`, + Default: "NONE", + }, + "destination_format": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. +The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. +The default value for models is SAVED_MODEL.`, + }, + "field_delimiter": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. +Default is ','`, + }, + "print_header": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to print out a header row in the results. Default is true.`, + Default: true, + }, + "source_model": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A reference to the model being exported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the dataset containing this model.`, + }, + "model_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the model.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the project containing this model.`, + }, + }, + }, + ExactlyOneOf: []string{"extract.0.source_table", "extract.0.source_model"}, + }, + "source_table": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A reference to the table being exported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The table. Can be specified '{{"{{"}}table_id{{"}}"}}' if 'project_id' and 'dataset_id' are also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}' if not.`, + }, + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + ExactlyOneOf: []string{"extract.0.source_table", "extract.0.source_model"}, + }, + "use_avro_logical_types": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to use logical types when extracting to AVRO format.`, + }, + }, + }, + ExactlyOneOf: []string{"query", "load", "copy", "extract"}, + }, + "job_timeout_ms": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `The labels associated with this job. You can use these to organize and group your jobs. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "load": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configures a load job.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_table": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The destination table to load the data into.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The table. Can be specified '{{"{{"}}table_id{{"}}"}}' if 'project_id' and 'dataset_id' are also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}' if not.`, + }, + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "source_uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The fully-qualified URIs that point to your data in Google Cloud. +For Google Cloud Storage URIs: Each URI can contain one '\*' wildcard character +and it must come after the 'bucket' name. Size limits related to load jobs apply +to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be +specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. +For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '\*' wildcard character is not allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allow_jagged_rows": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Accept rows that are missing trailing optional columns. The missing values are treated as nulls. +If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, +an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.`, + Default: false, + }, + "allow_quoted_newlines": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. +The default value is false.`, + Default: false, + }, + "autodetect": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if we should automatically infer the options and schema for CSV and JSON sources.`, + }, + "create_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + Description: `Specifies whether the job is allowed to create new tables. The following values are supported: +CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. +CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. +Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, + Default: "CREATE_IF_NEEDED", + }, + "destination_encryption_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. +The BigQuery Service Account associated with your project requires access to this encryption key.`, + }, + "kms_key_version": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, + }, + }, + }, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. +The default value is UTF-8. BigQuery decodes the data after the raw, binary data +has been split using the values of the quote and fieldDelimiter properties.`, + Default: "UTF-8", + }, + "field_delimiter": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. +To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts +the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the +data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. +The default value is a comma (',').`, + }, + "ignore_unknown_values": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. +If true, the extra values are ignored. If false, records with extra columns are treated as bad records, +and if there are too many bad records, an invalid error is returned in the job result. +The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: +CSV: Trailing columns +JSON: Named values that don't match any column names`, + Default: false, + }, + "json_extension": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. +For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited +GeoJSON: set to GEOJSON.`, + }, + "max_bad_records": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, +an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.`, + Default: 0, + }, + "null_marker": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value +when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an +empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as +an empty value.`, + Default: "", + }, + "parquet_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Parquet Options for load and make external tables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_list_inference": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.`, + AtLeastOneOf: []string{"load.0.parquet_options.0.enum_as_string", "load.0.parquet_options.0.enable_list_inference"}, + }, + "enum_as_string": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, + }, + }, + }, + }, + "projection_fields": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. +Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. +If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "quote": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, +and then uses the first byte of the encoded string to split the data in its raw, binary state. +The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. +If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.`, + }, + "schema_update_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or +supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; +when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. +For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: +ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. +ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "skip_leading_rows": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of rows at the top of a CSV file that BigQuery will skip when loading the data. +The default value is 0. This property is useful if you have header rows in the file that should be skipped. +When autodetect is on, the behavior is the following: +skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, +the row is read as data. Otherwise data is read starting from the second row. +skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. +skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, +row N is just skipped. Otherwise row N is used to extract column names for the detected schema.`, + Default: 0, + }, + "source_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". +For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". +For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". +The default value is CSV.`, + Default: "CSV", + }, + "time_partitioning": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Time-based partitioning specification for the destination table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, +but in OnePlatform the field will be treated as unset.`, + }, + "expiration_ms": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.`, + }, + "field": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. +The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. +A wrapper is used here because an empty string is an invalid value.`, + }, + }, + }, + }, + "write_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: +WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. +WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. +WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. +Each action is atomic and only occurs if BigQuery is able to complete the job successfully. +Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, + Default: "WRITE_EMPTY", + }, + }, + }, + ExactlyOneOf: []string{"query", "load", "copy", "extract"}, + }, + "query": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configures a query job.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. +*NOTE*: queries containing [DML language](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) +('DELETE', 'UPDATE', 'MERGE', 'INSERT') must specify 'create_disposition = ""' and 'write_disposition = ""'.`, + }, + "allow_large_results": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. +Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. +However, you must still set destinationTable when result size exceeds the allowed maximum response size.`, + }, + "create_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + Description: `Specifies whether the job is allowed to create new tables. The following values are supported: +CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. +CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. +Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, + Default: "CREATE_IF_NEEDED", + }, + "default_dataset": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The dataset. Can be specified '{{"{{"}}dataset_id{{"}}"}}' if 'project_id' is also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}' if not.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "destination_encryption_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. +The BigQuery Service Account associated with your project requires access to this encryption key.`, + }, + "kms_key_version": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, + }, + }, + }, + }, + "destination_table": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Describes the table where the query results should be stored. +This property must be set for large results that exceed the maximum response size. +For queries that produce anonymous (cached) results, this field will be populated by BigQuery.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The table. Can be specified '{{"{{"}}table_id{{"}}"}}' if 'project_id' and 'dataset_id' are also set, +or of the form 'projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}' if not.`, + }, + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "flatten_results": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. +allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.`, + }, + "maximum_billing_tier": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). +If unspecified, this will be set to your project default.`, + }, + "maximum_bytes_billed": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). +If unspecified, this will be set to your project default.`, + }, + "parameter_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.`, + }, + "priority": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERACTIVE", "BATCH", ""}), + Description: `Specifies a priority for the query. Default value: "INTERACTIVE" Possible values: ["INTERACTIVE", "BATCH"]`, + Default: "INTERACTIVE", + }, + "schema_update_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Allows the schema of the destination table to be updated as a side effect of the query job. +Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; +when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, +specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. +One or more of the following values are specified: +ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. +ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "script_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Options controlling the execution of scripts.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_result_statement": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"LAST", "FIRST_SELECT", ""}), + Description: `Determines which statement in the script represents the "key result", +used to populate the schema and query results of the script job. Possible values: ["LAST", "FIRST_SELECT"]`, + AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, + }, + "statement_byte_budget": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Limit on the number of bytes billed per statement. Exceeding this budget results in an error.`, + AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, + }, + "statement_timeout_ms": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Timeout period for each statement in a script.`, + AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, + }, + }, + }, + }, + "use_legacy_sql": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. +If set to false, the query will use BigQuery's standard SQL.`, + }, + "use_query_cache": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever +tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. +The default value is true.`, + Default: true, + }, + "user_defined_function_resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Describes user-defined function resources used in the query.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "inline_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An inline resource that contains code for a user-defined function (UDF). +Providing a inline code resource is equivalent to providing a URI for a file containing the same code.`, + }, + "resource_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A code resource to load from a Google Cloud Storage URI (gs://bucket/path).`, + }, + }, + }, + }, + "write_disposition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: +WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. +WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. +WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. +Each action is atomic and only occurs if BigQuery is able to complete the job successfully. +Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, + Default: "WRITE_EMPTY", + }, + }, + }, + ExactlyOneOf: []string{"query", "load", "copy", "extract"}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "job_type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of the job.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "job_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The geographic location of the job. The default value is US.`, + Default: "US", + }, + + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_result": { + Type: schema.TypeList, + Computed: true, + Description: `Final error result of the job. If present, indicates that the job has completed and was unsuccessful.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies where the error occurred, if present.`, + }, + "message": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the error.`, + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: `A short error code that summarizes the error.`, + }, + }, + }, + }, + "errors": { + Type: schema.TypeList, + Computed: true, + Description: `The first errors encountered during the running of the job. The final message +includes the number of errors that caused the process to stop. Errors here do +not necessarily mean that the job has not completed or was unsuccessful.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies where the error occurred, if present.`, + }, + "message": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the error.`, + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: `A short error code that summarizes the error.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.`, + }, + }, + }, + }, + "user_email": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who ran the job.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceBigQueryJobUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/billing_budget.go.tmpl b/mmv1/templates/terraform/state_migrations/go/billing_budget.go.tmpl new file mode 100644 index 000000000000..1487eb03fc2e --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/billing_budget.go.tmpl @@ -0,0 +1,251 @@ +func resourceBillingBudgetResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amount": { + Type: schema.TypeList, + Required: true, + Description: `The budgeted amount for each usage period.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "last_period_amount": { + Type: schema.TypeBool, + Optional: true, + Description: `Configures a budget amount that is automatically set to 100% of +last period's spend. +Boolean. Set value to true to use. Do not set to false, instead +use the 'specified_amount' block.`, + ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, + }, + "specified_amount": { + Type: schema.TypeList, + Optional: true, + Description: `A specified amount to use as the budget. currencyCode is +optional. If specified, it must match the currency of the +billing account. The currencyCode is provided on output.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "currency_code": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The 3-letter currency code defined in ISO 4217.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of nano (10^-9) units of the amount. +The value must be between -999,999,999 and +999,999,999 +inclusive. If units is positive, nanos must be positive or +zero. If units is zero, nanos can be positive, zero, or +negative. If units is negative, nanos must be negative or +zero. For example $-1.75 is represented as units=-1 and +nanos=-750,000,000.`, + }, + "units": { + Type: schema.TypeString, + Optional: true, + Description: `The whole units of the amount. For example if currencyCode +is "USD", then 1 unit is one US dollar.`, + }, + }, + }, + ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, + }, + }, + }, + }, + "billing_account": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the billing account to set a budget on.`, + }, + "threshold_rules": { + Type: schema.TypeList, + Required: true, + Description: `Rules that trigger alerts (notifications of thresholds being +crossed) when spend exceeds the specified percentages of the +budget.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threshold_percent": { + Type: schema.TypeFloat, + Required: true, + Description: `Send an alert when this threshold is exceeded. This is a +1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, + }, + "spend_basis": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}, false), + Description: `The type of basis used to determine if spend has passed +the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, + Default: "CURRENT_SPEND", + }, + }, + }, + }, + "all_updates_rule": { + Type: schema.TypeList, + Optional: true, + Description: `Defines notifications that are sent on every update to the +billing account's spend, regardless of the thresholds defined +using threshold rules.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_default_iam_recipients": { + Type: schema.TypeBool, + Optional: true, + Description: `Boolean. When set to true, disables default notifications sent +when a threshold is exceeded. Default recipients are +those with Billing Account Administrators and Billing +Account Users IAM roles for the target account.`, + Default: false, + }, + "monitoring_notification_channels": { + Type: schema.TypeList, + Optional: true, + Description: `The full resource name of a monitoring notification +channel in the form +projects/{project_id}/notificationChannels/{channel_id}. +A maximum of 5 channels are allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, + }, + "pubsub_topic": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the Cloud Pub/Sub topic where budget related +messages will be published, in the form +projects/{project_id}/topics/{topic_id}. Updates are sent +at regular intervals to the topic.`, + AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, + }, + "schema_version": { + Type: schema.TypeString, + Optional: true, + Description: `The schema version of the notification. Only "1.0" is +accepted. It represents the JSON schema as defined in +https://cloud.google.com/billing/docs/how-to/budgets#notification_format.`, + Default: "1.0", + }, + }, + }, + }, + "budget_filter": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Filters that define which resources are used to compute the actual +spend against the budget.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "credit_types": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A set of subaccounts of the form billingAccounts/{account_id}, +specifying that usage from only this set of subaccounts should +be included in the budget. If a subaccount is set to the name of +the parent account, usage from the parent account will be included. +If the field is omitted, the report will include usage from the parent +account and all subaccounts, if they exist.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + "credit_types_treatment": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS", ""}, false), + Description: `Specifies how credits should be treated when determining spend +for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, + Default: "INCLUDE_ALL_CREDITS", + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `A single label and value pair specifying that usage from only +this set of labeled resources should be included in the budget.`, + Elem: &schema.Schema{Type: schema.TypeString}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + "projects": { + Type: schema.TypeList, + Optional: true, + Description: `A set of projects of the form projects/{project_number}, +specifying that usage from only this set of projects should be +included in the budget. If omitted, the report will include +all usage for the billing account, regardless of which project +the usage occurred on.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + "services": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A set of services of the form services/{service_id}, +specifying that usage from only this set of services should be +included in the budget. If omitted, the report will include +usage for all the services. The service names are available +through the Catalog API: +https://cloud.google.com/billing/v1/how-tos/catalog-api.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + "subaccounts": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A set of subaccounts of the form billingAccounts/{account_id}, +specifying that usage from only this set of subaccounts should +be included in the budget. If a subaccount is set to the name of +the parent account, usage from the parent account will be included. +If the field is omitted, the report will include usage from the parent +account and all subaccounts, if they exist.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User data for display name in UI. Must be <= 60 chars.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of the budget. The resource name +implies the scope of a budget. Values are of the form +billingAccounts/{billingAccountId}/budgets/{budgetId}.`, + }, + }, + } +} + +func ResourceBillingBudgetUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["name"] = tpgresource.GetResourceNameFromSelfLink(rawState["name"].(string)) + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate.go.tmpl b/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate.go.tmpl new file mode 100644 index 000000000000..c4bc16128c72 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate.go.tmpl @@ -0,0 +1,209 @@ +func ResourceCertificateManagerCertificateUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Version 0 didn't support location. Default it to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCertificateManagerCertificateResourceV0() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateCreate, + Read: resourceCertificateManagerCertificateRead, + Update: resourceCertificateManagerCertificateUpdate, + Delete: resourceCertificateManagerCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the certificate. Certificate names must be unique +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the Certificate resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration and state of a Managed Certificate. +Certificate Manager provisions and renews Managed Certificates +automatically, for as long as it's authorized to do so.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_authorizations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Authorizations that will be used for performing domain authorization`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "domains": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The domains for which a managed SSL certificate will be generated. +Wildcard domains are only supported with DNS challenge resolution`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "authorization_attempt_info": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed state of the latest authorization attempt for each domain +specified for this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation for reaching the state. Provided to help +address the configuration issues. +Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name of the authorization attempt.`, + }, + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for failure of the authorization attempt for the domain.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the domain for managed certificate issuance.`, + }, + }, + }, + }, + "provisioning_issue": { + Type: schema.TypeList, + Computed: true, + Description: `Information about issues with provisioning this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation about the issue. Provided to help address +the configuration issues. +Not guaranteed to be stable. For programmatic access use 'reason' field.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for provisioning failures.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A state of this Managed Certificate.`, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "scope": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, + Description: `The scope of the certificate. + +DEFAULT: Certificates with default scope are served from core Google data centers. +If unsure, choose this option. + +EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, +served from non-core Google data centers. +Currently allowed only for managed certificates.`, + Default: "DEFAULT", + }, + "self_managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Certificate data for a SelfManaged Certificate. +SelfManaged Certificates are uploaded by the user. Updating such +certificates before they expire remains the user's responsibility.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_certificate`", + Description: `**Deprecated** The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_private_key": { + Type: schema.TypeString, + Optional: true, + Description: `The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + "private_key_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_private_key`", + Description: `**Deprecated** The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate_issuance_config.go.tmpl b/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate_issuance_config.go.tmpl new file mode 100644 index 000000000000..a25e7c5fb69f --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/certificate_manager_certificate_issuance_config.go.tmpl @@ -0,0 +1,132 @@ +func resourceCertificateManagerCertificateIssuanceConfigResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The CA that issues the workload certificate. It includes the CA address, type, authentication to CA service, etc.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority_service_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines a CertificateAuthorityServiceConfig.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_pool": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `A CA pool resource used to issue a certificate. +The CA pool string has a relative resource path following the form +"projects/{project}/locations/{location}/caPools/{caPool}".`, + }, + }, + }, + }, + }, + }, + }, + "key_algorithm": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"RSA_2048", "ECDSA_P256"}), + Description: `Key algorithm to use when generating the private key. Possible values: ["RSA_2048", "ECDSA_P256"]`, + }, + "lifetime": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Lifetime of issued certificates. A duration in seconds with up to nine fractional digits, ending with 's'. +Example: "1814400s". Valid values are from 21 days (1814400s) to 30 days (2592000s)`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the certificate issuance config. +CertificateIssuanceConfig names must be unique globally.`, + }, + "rotation_window_percentage": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `It specifies the percentage of elapsed time of the certificate lifetime to wait before renewing the certificate. +Must be a number between 1-99, inclusive. +You must set the rotation window percentage in relation to the certificate lifetime so that certificate renewal occurs at least 7 days after +the certificate has been issued and at least 7 days before it expires.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `One or more paragraphs of text description of a CertificateIssuanceConfig.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `'Set of label tags associated with the CertificateIssuanceConfig resource. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "count": "3" }. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Certificate Manager location. If not specified, "global" is used.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp of a CertificateIssuanceConfig. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last update timestamp of a CertificateIssuanceConfig. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceCertificateManagerCertificateIssuanceConfigUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/certificate_manager_dns_authorization.go.tmpl b/mmv1/templates/terraform/state_migrations/go/certificate_manager_dns_authorization.go.tmpl new file mode 100644 index 000000000000..b3d29dc64136 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/certificate_manager_dns_authorization.go.tmpl @@ -0,0 +1,91 @@ +func ResourceCertificateManagerDnsAuthorizationUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Version 0 didn't support location. Default it to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCertificateManagerDnsAuthorizationResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A domain which is being authorized. A DnsAuthorization resource covers a +single domain and its wildcard, e.g. authorization for "example.com" can +be used to issue certificates for "example.com" and "*.example.com".`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the DNS Authorization resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dns_resource_record": { + Type: schema.TypeList, + Computed: true, + Description: `The structure describing the DNS Resource Record that needs to be added +to DNS configuration for the authorization to be usable by +certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Computed: true, + Description: `Data of the DNS Resource Record.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Fully qualified name of the DNS Resource Record. +E.g. '_acme-challenge.example.com'.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of the DNS Resource Record.`, + }, + }, + }, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } + +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/cloud_build_trigger.go.tmpl b/mmv1/templates/terraform/state_migrations/go/cloud_build_trigger.go.tmpl new file mode 100644 index 000000000000..2146c27cfe4f --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/cloud_build_trigger.go.tmpl @@ -0,0 +1,982 @@ +func ResourceCloudBuildTriggerUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Versions 0 and 1 didn't support location. Default them to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCloudBuildTriggerResourceV1() *schema.Resource { + // Cloud Build Triggers started with V1 since its beginnings. + return resourceCloudBuildTriggerResourceV0() +} + +func ResourceCloudBuildTriggerUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + // Do nothing as V0 and V1 are exactly the same. + return rawState, nil +} + +func resourceCloudBuildTriggerResourceV0() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudBuildTriggerCreate, + Read: resourceCloudBuildTriggerRead, + Update: resourceCloudBuildTriggerUpdate, + Delete: resourceCloudBuildTriggerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudBuildTriggerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + CustomizeDiff: stepTimeoutCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "approval_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configuration for manual approval to start a build invocation of this BuildTrigger. +Builds created by this trigger will require approval before they execute. +Any user with a Cloud Build Approver role for the project can approve a build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approval_required": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not approval is needed. If this is set on a build, it will become pending when run, +and will need to be explicitly approved to start.`, + Default: false, + }, + }, + }, + }, + "build": { + Type: schema.TypeList, + Optional: true, + Description: `Contents of the build template. Either a filename or build template must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step": { + Type: schema.TypeList, + Required: true, + Description: `The operations to be performed on the workspace.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the container image that will run this particular build step. +If the image is available in the host's Docker daemon's cache, it will be +run directly. If not, the host will attempt to pull the image first, using +the builder service account's credentials if necessary. +The Docker daemon's cache will already have the latest versions of all of +the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders +for images and examples). +The Docker daemon will also have cached many of the layers for some popular +images, like "ubuntu", "debian", but they will be refreshed at the time +you attempt to use them. +If you built an image in a previous build step, it will be stored in the +host's Docker daemon's cache and is available to use as the name for a +later build step.`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `A list of arguments that will be presented to the step when it is started. +If the image used to run the step's container has an entrypoint, the args +are used as arguments to that entrypoint. If the image does not define an +entrypoint, the first element in args is used as the entrypoint, and the +remainder will be used as arguments.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Working directory to use when running this step's container. +If this value is a relative path, it is relative to the build's working +directory. If this value is absolute, it may be outside the build's working +directory, in which case the contents of the path may not be persisted +across build step executions, unless a 'volume' for that path is specified. +If the build specifies a 'RepoSource' with 'dir' and a step with a +'dir', +which specifies an absolute path, the 'RepoSource' 'dir' is ignored +for the step's execution.`, + }, + "entrypoint": { + Type: schema.TypeString, + Optional: true, + Description: `Entrypoint to be used instead of the build step image's +default entrypoint. +If unset, the image's default entrypoint is used`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variable definitions to be used when +running a step. +The elements are of the form "KEY=VALUE" for the environment variable +"KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "id": { + Type: schema.TypeString, + Optional: true, + Description: `Unique identifier for this build step, used in 'wait_for' to +reference this build step as a dependency.`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variables which are encrypted using +a Cloud Key +Management Service crypto key. These values must be specified in +the build's 'Secret'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Time limit for executing this build step. If not defined, +the step has no +time limit and will be allowed to continue to run until either it +completes or the build itself times out.`, + }, + "timing": { + Type: schema.TypeString, + Optional: true, + Description: `Output only. Stores timing information for executing this +build step.`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `List of volumes to mount into the build step. +Each volume is created as an empty volume prior to execution of the +build step. Upon completion of the build, volumes and their contents +are discarded. +Using a named volume in only one step is not valid as it is +indicative of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the volume to mount. +Volume names must be unique per build step and must be valid names for +Docker volumes. Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `Path at which to mount the volume. +Paths must be absolute and cannot conflict with other volume paths on +the same build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "wait_for": { + Type: schema.TypeList, + Optional: true, + Description: `The ID(s) of the step(s) that this build step depends on. +This build step will not start until all the build steps in 'wait_for' +have completed successfully. If 'wait_for' is empty, this build step +will start when all previous build steps in the 'Build.Steps' list +have completed successfully.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "artifacts": { + Type: schema.TypeList, + Optional: true, + Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. +The images will be pushed using the builder service account's credentials. +The digests of the pushed images will be stored in the Build resource's results field. +If any of the images fail to be pushed, the build is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "objects": { + Type: schema.TypeList, + Optional: true, + Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. +Files in the workspace matching specified paths globs will be uploaded to the +Cloud Storage location using the builder service account's credentials. +The location and generation of the uploaded objects will be stored in the Build resource's results field. +If any objects fail to be pushed, the build is marked FAILURE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". +Files in the workspace matching any path pattern will be uploaded to Cloud Storage with +this location as a prefix.`, + }, + "paths": { + Type: schema.TypeList, + Optional: true, + Description: `Path globs used to match files in the build's workspace.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timing": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Stores timing information for pushing all artifact objects.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `End of time span. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Start of time span. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "available_secrets": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets and secret environment variables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_manager": { + Type: schema.TypeList, + Required: true, + Description: `Pairs a secret environment variable with a SecretVersion in Secret Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "env": { + Type: schema.TypeString, + Required: true, + Description: `Environment variable name to associate with the secret. Secret environment +variables must be unique across all of a build's secrets, and must be used +by at least one build step.`, + }, + "version_name": { + Type: schema.TypeString, + Required: true, + Description: `Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/*`, + }, + }, + }, + }, + }, + }, + }, + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. +The images are pushed using the builder service account's credentials. +The digests of the pushed images will be stored in the Build resource's results field. +If any of the images fail to be pushed, the build status is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "logs_bucket": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage bucket where logs should be written. +Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, + }, + "options": { + Type: schema.TypeList, + Optional: true, + Description: `Special options for this build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; +some of the space will be used by the operating system and build utilities. +Also note that this is the minimum disk size that will be allocated for the build -- +the build may run with a larger disk than requested. At present, the maximum disk size +is 1000GB; builds that request more than the maximum are rejected with an error.`, + }, + "dynamic_substitutions": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to specify whether or not to apply bash style string operations to the substitutions. +NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variable definitions that will exist for all build steps +in this build. If a variable is defined in both globally and in a build step, +the variable will use the build step value. +The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "log_streaming_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}), + Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, + }, + "logging": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE", ""}), + Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"]`, + }, + "machine_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}), + Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, + }, + "requested_verify_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NOT_VERIFIED", "VERIFIED", ""}), + Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variables, which are encrypted using a Cloud Key Management +Service crypto key. These values must be specified in the build's Secret. These variables +will be available to all build steps in this build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "source_provenance_hash": { + Type: schema.TypeList, + Optional: true, + Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "SHA256", "MD5"}), + }, + }, + "substitution_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}), + Description: `Option to specify behavior when there is an error in the substitution checks. +NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden +in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Global list of volumes to mount for ALL build steps +Each volume is created as an empty volume prior to starting the build process. +Upon completion of the build, volumes and their contents are discarded. Global +volume names and paths cannot conflict with the volumes defined a build step. +Using a global volume in a build with only one step is not valid as it is indicative +of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the volume to mount. +Volume names must be unique per build step and must be valid names for Docker volumes. +Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path at which to mount the volume. +Paths must be absolute and cannot conflict with other volume paths on the same +build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "worker_pool": { + Type: schema.TypeString, + Optional: true, + Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} +This field is experimental.`, + }, + }, + }, + }, + "queue_ttl": { + Type: schema.TypeString, + Optional: true, + Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, +the build will expire and the build status will be EXPIRED. +The TTL starts ticking from createTime. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets to decrypt using Cloud Key Management Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `Cloud KMS key name to use to decrypt these envs.`, + }, + "secret_env": { + Type: schema.TypeMap, + Optional: true, + Description: `Map of environment variable name to its encrypted value. +Secret environment variables must be unique across all of a build's secrets, +and must be used by at least one build step. Values can be at most 64 KB in size. +There can be at most 100 secret values across all of a build's secrets.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "source": { + Type: schema.TypeList, + Optional: true, + Description: `The location of the source files to build. +One of 'storageSource' or 'repoSource' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in a Google Cloud Source Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Source Repository.`, + }, + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. +This must be a relative path. If a step's dir is specified and is an absolute path, +this value is ignored for that step's execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. +If omitted, the project ID requesting the build is assumed.`, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + }, + }, + }, + "storage_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in an archive file in Google Cloud Storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage bucket containing the source.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage object containing the source. +This object must be a gzipped archive file (.tar.gz) containing source to build.`, + }, + "generation": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage generation for the object. +If the generation is omitted, the latest generation will be used`, + }, + }, + }, + }, + }, + }, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a Build. These are not docker tags.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Amount of time that this build should be allowed to run, to second granularity. +If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. +This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. +The expected format is the number of seconds followed by s. +Default time is ten minutes (600s).`, + Default: "600s", + }, + }, + }, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human-readable description of the trigger.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, + }, + "filename": { + Type: schema.TypeString, + Optional: true, + Description: `Path, from the source root, to a file whose contents is used for the template. +Either a filename or build template must be provided. Set this only when using trigger_template or github. +When using Pub/Sub, Webhook or Manual set the file name using git_file_source instead.`, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A Common Expression Language string. Used only with Pub/Sub and Webhook.`, + }, + "git_file_source": { + Type: schema.TypeList, + Optional: true, + Description: `The file source describing the local or remote Build template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `The path of the file, with the repo root as the root of the path.`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, + }, + "revision": { + Type: schema.TypeString, + Optional: true, + Description: `The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the +filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions +If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.`, + }, + "uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the repo (optional). If unspecified, the repo from which the trigger +invocation originated is assumed to be the repo from which to read the specified path.`, + }, + }, + }, + ExactlyOneOf: []string{"filename", "git_file_source", "build"}, + }, + "github": { + Type: schema.TypeList, + Optional: true, + Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. +One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the repository. For example: The name for +https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, + }, + "owner": { + Type: schema.TypeString, + Optional: true, + Description: `Owner of the repository. For example: The owner for +https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, + }, + "pull_request": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Required: true, + Description: `Regex of branches to match.`, + }, + "comment_control": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), + Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, branches that do NOT match the git_ref will trigger a build.`, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + "push": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of tags to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "ignored_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. +If ignoredFiles and changed files are both empty, then they are not +used to determine whether or not to trigger a build. +If ignoredFiles is not empty, then we ignore any files that match any +of the ignored_file globs. If the change has no files that are outside +of the ignoredFiles globs, then we do not trigger a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_build_logs": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS", ""}), + Description: `Build logs will be sent back to GitHub as part of the checkrun +result. Values can be INCLUDE_BUILD_LOGS_UNSPECIFIED or +INCLUDE_BUILD_LOGS_WITH_STATUS Possible values: ["INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS"]`, + }, + "included_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is empty, then as far as this filter is concerned, we +should trigger the build. +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is not empty, then we make sure that at least one of +those files matches a includedFiles glob. If not, then we do not trigger +a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name of the trigger. Must be unique within the project.`, + }, + "pubsub_config": { + Type: schema.TypeList, + Optional: true, + Description: `PubsubConfig describes the configuration of a trigger that creates +a build whenever a Pub/Sub message is published. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `The name of the topic from which this subscription is receiving messages.`, + }, + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Description: `Service account that will make the push request.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + "subscription": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Name of the subscription.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "service_account": { + Type: schema.TypeString, + Optional: true, + Description: `The service account used for all user-controlled operations including +triggers.patch, triggers.run, builds.create, and builds.cancel. +If no service account is set, then the standard Cloud Build service account +([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. +Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, + }, + "source_to_build": { + Type: schema.TypeList, + Optional: true, + Description: `The repo and ref of the repository from which to build. +This field is used only for those triggers that do not respond to SCM events. +Triggers that respond to such events build source at whatever commit caused the event. +This field is currently only used by Webhook, Pub/Sub, Manual, and Cron triggers. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref": { + Type: schema.TypeString, + Required: true, + Description: `The branch or tag to use. Must start with "refs/" (required).`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, + }, + "uri": { + Type: schema.TypeString, + Required: true, + Description: `The URI of the repo (required).`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a BuildTrigger`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "trigger_template": { + Type: schema.TypeList, + Optional: true, + Description: `Template describing the types of source changes to trigger a build. +Branch and tag names in trigger templates are interpreted as regular +expressions. Any branch or tag change that matches that regular +expression will trigger a build. +One of 'trigger_template', 'github', 'pubsub_config', 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. +This must be a relative path. If a step's dir is specified and +is an absolute path, this value is ignored for that step's +execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. If +omitted, the project ID requesting the build is assumed.`, + }, + "repo_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, + Default: "default", + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "webhook_config": { + Type: schema.TypeList, + Optional: true, + Description: `WebhookConfig describes the configuration of a trigger that creates +a build whenever a webhook is sent to a trigger's webhook URL. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `Resource name for the secret required as a URL parameter.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time when the trigger was created.`, + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the trigger.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/cloud_run_domain_mapping.go.tmpl b/mmv1/templates/terraform/state_migrations/go/cloud_run_domain_mapping.go.tmpl new file mode 100644 index 000000000000..8dd3a6aa3559 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/cloud_run_domain_mapping.go.tmpl @@ -0,0 +1,281 @@ +var domainMappingGoogleProvidedLocationLabel = "cloud.googleapis.com/location" +var domainMappingGoogleProvidedOverrideLabel = "run.googleapis.com/overrideAt" + +var domainMappingGoogleProvidedLabels = []string{ + domainMappingGoogleProvidedLocationLabel, + domainMappingGoogleProvidedOverrideLabel, +} + +func DomainMappingLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the labels provided by Google + for _, label := range domainMappingGoogleProvidedLabels { + if strings.Contains(k, label) && new == "" { + return true + } + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func resourceCloudRunDomainMappingResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the cloud run instance. eg us-central1`, + }, + "metadata": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Metadata associated with this DomainMapping.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number.`, + }, + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: cloudrunAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: DomainMappingLabelDiffSuppress, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects. May match selectors of replication controllers +and routes. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources. + +More info: +https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations. + +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain`, + }, + "spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The spec for this DomainMapping.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "route_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Run Service that this DomainMapping applies to. +The route must exist.`, + }, + "certificate_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "AUTOMATIC", ""}), + Description: `The mode of the certificate. Default value: "AUTOMATIC" Possible values: ["NONE", "AUTOMATIC"]`, + Default: "AUTOMATIC", + }, + "force_override": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If set, the mapping will override any mapping set before this spec was set. +It is recommended that the user leaves this empty to receive an error +warning about a potential conflict and only set it once the respective UI +has given such a warning.`, + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The current status of the DomainMapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_records": { + Type: schema.TypeList, + Optional: true, + Description: `The resource records required to configure this domain mapping. These +records must be added to the domain's DNS configuration in order to +serve the application via this domain mapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"A", "AAAA", "CNAME", ""}), + Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Relative name of the object affected by this record. Only applicable for +'CNAME' records. Example: 'www'.`, + }, + "rrdata": { + Type: schema.TypeString, + Computed: true, + Description: `Data for this record. Values vary by record type, as defined in RFC 1035 +(section 5) and RFC 1034 (section 3.6.1).`, + }, + }, + }, + }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `Array of observed DomainMappingConditions, indicating the current state +of the DomainMapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `One-word CamelCase reason for the condition's current status.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the condition, one of True, False, Unknown.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of domain mapping condition.`, + }, + }, + }, + }, + "mapped_route_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the route that the mapping currently points to.`, + }, + "observed_generation": { + Type: schema.TypeInt, + Computed: true, + Description: `ObservedGeneration is the 'Generation' of the DomainMapping that +was last processed by the controller.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceCloudRunDomainMappingUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + if rawState["metadata"] != nil { + rawMetadatas := rawState["metadata"].([]interface{}) + if len(rawMetadatas) > 0 && rawMetadatas[0] != nil { + // Upgrade labels fields + rawMetadata := rawMetadatas[0].(map[string]interface{}) + + rawLabels := rawMetadata["labels"] + rawTerraformLabels := rawMetadata["terraform_labels"] + if rawLabels != nil { + labels := make(map[string]interface{}) + effectiveLabels := make(map[string]interface{}) + + for k, v := range rawLabels.(map[string]interface{}) { + effectiveLabels[k] = v + + if !strings.Contains(k, domainMappingGoogleProvidedLocationLabel) && !strings.Contains(k, domainMappingGoogleProvidedOverrideLabel) { + labels[k] = v + } + } + + rawMetadata["labels"] = labels + rawMetadata["effective_labels"] = effectiveLabels + + if rawTerraformLabels == nil { + rawMetadata["terraform_labels"] = labels + } + } + + upgradeAnnotations(rawMetadata) + + rawState["metadata"] = []interface{}{rawMetadata} + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/cloud_run_service.go.tmpl b/mmv1/templates/terraform/state_migrations/go/cloud_run_service.go.tmpl new file mode 100644 index 000000000000..f5d3d215d3e2 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/cloud_run_service.go.tmpl @@ -0,0 +1,1099 @@ +var cloudRunGoogleProvidedAnnotations = regexp.MustCompile(`serving\.knative\.dev/(?:(?:creator)|(?:lastModifier))$|run\.googleapis\.com/(?:(?:ingress-status)|(?:operation-id))$|cloud\.googleapis\.com/(?:(?:location))`) + +func cloudrunAnnotationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the annotations provided by Google + if cloudRunGoogleProvidedAnnotations.MatchString(k) && new == "" { + return true + } + + if strings.HasSuffix(k, "run.googleapis.com/ingress") { + return old == "all" && new == "" + } + + // Let diff be determined by annotations (above) + if strings.Contains(k, "annotations.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +var cloudRunGoogleProvidedLabels = regexp.MustCompile(`cloud\.googleapis\.com/(?:(?:location))`) + +func cloudrunLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the labels provided by Google + if cloudRunGoogleProvidedLabels.MatchString(k) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func resourceCloudRunServiceResourceV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the cloud run instance. eg us-central1`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name must be unique within a Google Cloud project and region. +Is required when creating resources. Name is primarily intended +for creation idempotence and configuration definition. Cannot be updated. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, + }, + "template": { + Type: schema.TypeList, + Optional: true, + Description: `template holds the latest specification for the Revision to +be stamped out. The template references the container image, and may also +include labels and annotations that should be attached to the Revision. +To correlate a Revision, and/or to force a Revision to be created when the +spec doesn't otherwise change, a nonce label may be provided in the +template metadata. For more details, see: +https://github.com/knative/serving/blob/main/docs/client-conventions.md#associate-modifications-with-revisions + +Cloud Run does not currently support referencing a build that is +responsible for materializing the container image from source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "spec": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `RevisionSpec holds the desired state of the Revision (from the client).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Containers defines the unit of execution for this Revision.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Required: true, + Description: `Docker image name. This is most often a reference to a container located +in the container registry, such as gcr.io/cloudrun/hello`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Arguments to the entrypoint. +The docker image's CMD is used if this is not provided.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Description: `Entrypoint array. Not executed within a shell. +The docker image's ENTRYPOINT is used if this is not provided.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "env": { + Type: schema.TypeSet, + Optional: true, + Description: `List of environment variables to set in the container.`, + Elem: cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema(), + // Default schema.HashSchema is used. + }, + "env_from": { + Type: schema.TypeList, + Optional: true, + Deprecated: "`env_from` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API.", + ForceNew: true, + Description: `List of sources to populate environment variables in the container. +All invalid keys will be reported as an event when the container is starting. +When a key exists in multiple sources, the value associated with the last source will +take precedence. Values defined by an Env with a duplicate key will take +precedence.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_map_ref": { + Type: schema.TypeList, + Optional: true, + Description: `The ConfigMap to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_object_reference": { + Type: schema.TypeList, + Optional: true, + Description: `The ConfigMap to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the referent.`, + }, + }, + }, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + Description: `Specify whether the ConfigMap must be defined`, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: `An optional identifier to prepend to each key in the ConfigMap.`, + }, + "secret_ref": { + Type: schema.TypeList, + Optional: true, + Description: `The Secret to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_object_reference": { + Type: schema.TypeList, + Optional: true, + Description: `The Secret to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the referent.`, + }, + }, + }, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + Description: `Specify whether the Secret must be defined`, + }, + }, + }, + }, + }, + }, + }, + "liveness_probe": { + Type: schema.TypeList, + Optional: true, + Description: `Periodic probe of container liveness. Container will be restarted if the probe fails.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after +having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HttpGet specifies the http request to perform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value.`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. If set, it should not be empty string.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is +initiated. +Defaults to 0 seconds. Minimum value is 0. Maximum value is 3600.`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1. Maximum value is 3600.`, + Default: 10, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. Maximum value is 3600. +Must be smaller than period_seconds.`, + Default: 1, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name of the container`, + }, + "ports": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `List of open ports in the container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `If specified, used to specify which protocol to use. Allowed values are "http1" (HTTP/1) and "h2c" (HTTP/2 end-to-end). Defaults to "http1".`, + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: `Protocol for port. Must be "TCP". Defaults to "TCP".`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Compute Resources required by this container. Used to set values such as max memory`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Limits describes the maximum amount of compute resources allowed. +The values of the map is string form of the 'quantity' k8s type: +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "requests": { + Type: schema.TypeMap, + Optional: true, + Description: `Requests describes the minimum amount of compute resources required. +If Requests is omitted for a container, it defaults to Limits if that is +explicitly specified, otherwise to an implementation-defined value. +The values of the map is string form of the 'quantity' k8s type: +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "startup_probe": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Startup probe of application within the container. +All other probes are disabled if a startup probe is provided, until it +succeeds. Container will not be added to service endpoints if the probe fails.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after +having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HttpGet specifies the http request to perform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value.`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. If set, it should not be empty string.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is +initiated. +Defaults to 0 seconds. Minimum value is 0. Maximum value is 240.`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1. Maximum value is 240.`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Description: `TcpSocket specifies an action involving a TCP port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. Maximum value is 3600. +Must be smaller than periodSeconds.`, + Default: 1, + }, + }, + }, + }, + "volume_mounts": { + Type: schema.TypeList, + Optional: true, + Description: `Volume to mount into the container's filesystem. +Only supports SecretVolumeSources.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + Description: `Path within the container at which the volume should be mounted. Must +not contain ':'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `This must match the Name of a Volume.`, + }, + }, + }, + }, + "working_dir": { + Type: schema.TypeString, + Optional: true, + Deprecated: "`working_dir` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API.", + ForceNew: true, + Description: `Container's working directory. +If not specified, the container runtime's default will be used, which +might be configured in the container image.`, + }, + }, + }, + }, + "container_concurrency": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `ContainerConcurrency specifies the maximum allowed in-flight (concurrent) +requests per container of the Revision. Values are: +- '0' thread-safe, the system should manage the max concurrency. This is + the default value. +- '1' not-thread-safe. Single concurrency +- '2-N' thread-safe, max concurrency of N`, + }, + "service_account_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Email address of the IAM service account associated with the revision of the +service. The service account represents the identity of the running revision, +and determines what permissions the revision has. If not provided, the revision +will use the project's default service account.`, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `TimeoutSeconds holds the max duration the instance is allowed for responding to a request.`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Volume represents a named volume in a container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Volume's name.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "empty_dir": { + Type: schema.TypeList, + Optional: true, + Description: `Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "medium": { + Type: schema.TypeString, + Optional: true, + Description: `The medium on which the data is stored. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory.`, + }, + "size_limit": { + Type: schema.TypeString, + Optional: true, + Description: `Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir.`, + }, + }, + }, + }, +{{- end }} + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `The secret's value will be presented as the content of a file whose +name is defined in the item path. If no items are defined, the name of +the file is the secret_name.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. By default, the secret +is assumed to be in the same project. +If the secret is in another project, you must define an alias. +An alias definition has the form: +{alias}:projects/{project-id|project-number}/secrets/{secret-name}. +If multiple alias definitions are needed, they must be separated by +commas. +The alias definitions must be set on the run.googleapis.com/secrets +annotation.`, + }, + "default_mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Mode bits to use on created files by default. Must be a value between 0000 +and 0777. Defaults to 0644. Directories within the path are not affected by +this setting. This might be in conflict with other options that affect the +file mode, like fsGroup, and the result can be other mode bits set.`, + }, + "items": { + Type: schema.TypeList, + Optional: true, + Description: `If unspecified, the volume will expose a file whose name is the +secret_name. +If specified, the key will be used as the version to fetch from Cloud +Secret Manager and the path will be the name of the file exposed in the +volume. When items are defined, they must specify a key and a path.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Secret Manager secret version. +Can be 'latest' for the latest value or an integer for a specific version.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `The relative path of the file to map the key to. +May not be an absolute path. +May not contain the path element '..'. +May not start with the string '..'.`, + }, + "mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Mode bits to use on this file, must be a value between 0000 and 0777. If +not specified, the volume defaultMode will be used. This might be in +conflict with other options that affect the file mode, like fsGroup, and +the result can be other mode bits set.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "serving_state": { + Type: schema.TypeString, + Computed: true, + Deprecated: "`serving_state` is deprecated and will be removed in a future major release. This field is not supported by the Cloud Run API.", + Description: `ServingState holds a value describing the state the resources +are in for this Revision. +It is expected +that the system will manipulate this based on routability and load.`, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Optional metadata for this Revision, including labels and annotations. +Name will be generated by the Configuration. To set minimum instances +for this revision, use the "autoscaling.knative.dev/minScale" annotation +key. To set maximum instances for this revision, use the +"autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL +connections for the revision, use the "run.googleapis.com/cloudsql-instances" +annotation key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunTemplateAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. + +Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation +keys to configure features on a Revision template: + +- 'autoscaling.knative.dev/maxScale' sets the [maximum number of container + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--max-instances) of the Revision to run. +- 'autoscaling.knative.dev/minScale' sets the [minimum number of container + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min-instances) of the Revision to run. +- 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. +- 'run.googleapis.com/cloudsql-instances' sets the [Cloud SQL + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--add-cloudsql-instances) the Revision connects to. +- 'run.googleapis.com/cpu-throttling' sets whether to throttle the CPU when the container is not actively serving + requests. See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-throttling. +- 'run.googleapis.com/encryption-key-shutdown-hours' sets the number of hours to wait before an automatic shutdown + server after CMEK key revocation is detected. +- 'run.googleapis.com/encryption-key' sets the [CMEK key](https://cloud.google.com/run/docs/securing/using-cmek) + reference to encrypt the container with. +- 'run.googleapis.com/execution-environment' sets the [execution + environment](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--execution-environment) + where the application will run. +- 'run.googleapis.com/post-key-revocation-action-type' sets the + [action type](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--post-key-revocation-action-type) + after CMEK key revocation. +- 'run.googleapis.com/secrets' sets a list of key-value pairs to set as + [secrets](https://cloud.google.com/run/docs/configuring/secrets#yaml). +- 'run.googleapis.com/sessionAffinity' sets whether to enable + [session affinity](https://cloud.google.com/sdk/gcloud/reference/beta/run/deploy#--[no-]session-affinity) + for connections to the Revision. +- 'run.googleapis.com/startup-cpu-boost' sets whether to allocate extra CPU to containers on startup. + See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-boost. +- 'run.googleapis.com/vpc-access-connector' sets a [VPC connector](https://cloud.google.com/run/docs/configuring/connecting-vpc#terraform_1) + for the Revision. +- 'run.googleapis.com/vpc-access-egress' sets the outbound traffic to send through the VPC connector for this resource. + See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--vpc-egress.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunTemplateLabelDiffSuppress, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name must be unique within a Google Cloud project and region. +Is required when creating resources. Name is primarily intended +for creation idempotence and configuration definition. Cannot be updated.`, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number. It will default to the resource's project.`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations.`, + }, + }, + }, + }, + }, + }, + }, + "traffic": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Traffic specifies how to distribute traffic over a collection of Knative Revisions +and Configurations`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percent": { + Type: schema.TypeInt, + Required: true, + Description: `Percent specifies percent of the traffic to this Revision or Configuration.`, + }, + "latest_revision": { + Type: schema.TypeBool, + Optional: true, + Description: `LatestRevision may be optionally provided to indicate that the latest ready +Revision of the Configuration should be used for this traffic target. When +provided LatestRevision must be true if RevisionName is empty; it must be +false when RevisionName is non-empty.`, + }, + "revision_name": { + Type: schema.TypeString, + Optional: true, + Description: `RevisionName of a specific revision to which to send this portion of traffic.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Tag is optionally used to expose a dedicated url for referencing this target exclusively.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `URL displays the URL for accessing tagged traffic targets. URL is displayed in status, +and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, +but may not contain anything else (e.g. basic auth, url path, etc.)`, + }, + }, + }, + }, + + "metadata": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Metadata associated with this Service, including name, namespace, labels, +and annotations.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. + +Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation +keys to configure features on a Service: + +- 'run.googleapis.com/binary-authorization-breakglass' sets the [Binary Authorization breakglass](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--breakglass). +- 'run.googleapis.com/binary-authorization' sets the [Binary Authorization](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--binary-authorization). +- 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. +- 'run.googleapis.com/custom-audiences' sets the [custom audiences](https://cloud.google.com/sdk/gcloud/reference/alpha/run/deploy#--add-custom-audiences) + that can be used in the audience field of ID token for authenticated requests. +- 'run.googleapis.com/description' sets a user defined description for the Service. +- 'run.googleapis.com/ingress' sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) + for the Service. For example, '"run.googleapis.com/ingress" = "all"'. +- 'run.googleapis.com/launch-stage' sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) + when a preview feature is used. For example, '"run.googleapis.com/launch-stage": "BETA"'`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunLabelDiffSuppress, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects. May match selectors of replication controllers +and routes.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number.`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations.`, + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The current status of the Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `Array of observed Service Conditions, indicating the current ready state of the service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `One-word CamelCase reason for the condition's current status.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the condition, one of True, False, Unknown.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of domain mapping condition.`, + }, + }, + }, + }, + "latest_created_revision_name": { + Type: schema.TypeString, + Computed: true, + Description: `From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created +from this Service's Configuration. It might not be ready yet, for that use +LatestReadyRevisionName.`, + }, + "latest_ready_revision_name": { + Type: schema.TypeString, + Computed: true, + Description: `From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision +stamped out from this Service's Configuration that has had its "Ready" condition become +"True".`, + }, + "observed_generation": { + Type: schema.TypeInt, + Computed: true, + Description: `ObservedGeneration is the 'Generation' of the Route that was last processed by the +controller. + +Clients polling for completed reconciliation should poll until observedGeneration = +metadata.generation and the Ready condition's status is True or False.`, + }, + "traffic": { + Type: schema.TypeList, + Computed: true, + Description: `Traffic specifies how to distribute traffic over a collection of Knative Revisions +and Configurations`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "latest_revision": { + Type: schema.TypeBool, + Computed: true, + Description: `LatestRevision may be optionally provided to indicate that the latest ready +Revision of the Configuration should be used for this traffic target. When +provided LatestRevision must be true if RevisionName is empty; it must be +false when RevisionName is non-empty.`, + }, + "percent": { + Type: schema.TypeInt, + Computed: true, + Description: `Percent specifies percent of the traffic to this Revision or Configuration.`, + }, + "revision_name": { + Type: schema.TypeString, + Computed: true, + Description: `RevisionName of a specific revision to which to send this portion of traffic.`, + }, + "tag": { + Type: schema.TypeString, + Computed: true, + Description: `Tag is optionally used to expose a dedicated url for referencing this target exclusively.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `URL displays the URL for accessing tagged traffic targets. URL is displayed in status, +and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, +but may not contain anything else (e.g. basic auth, url path, etc.)`, + }, + }, + }, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `From RouteStatus. URL holds the url that will distribute traffic over the provided traffic +targets. It generally has the form +https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app`, + }, + }, + }, + }, + "autogenerate_revision_name": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', the revision name (template.metadata.name) will be omitted and +autogenerated by Cloud Run. This cannot be set to 'true' while 'template.metadata.name' +is also set. +(For legacy support, if 'template.metadata.name' is unset in state while +this field is set to false, the revision name will still autogenerate.)`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func upgradeAnnotations(rawMetadata map[string]interface{}) { + rawAnnotations := rawMetadata["annotations"] + if rawAnnotations != nil { + annotations := make(map[string]interface{}) + effectiveAnnotations := make(map[string]interface{}) + + for k, v := range rawAnnotations.(map[string]interface{}) { + effectiveAnnotations[k] = v + + if !(cloudRunGoogleProvidedAnnotations.MatchString(k) || (strings.HasSuffix(k, "run.googleapis.com/ingress") && v == "all")) { + annotations[k] = v + } + } + + rawMetadata["annotations"] = annotations + rawMetadata["effective_annotations"] = effectiveAnnotations + } +} + +func ResourceCloudRunServiceUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + if rawState["metadata"] != nil { + rawMetadatas := rawState["metadata"].([]interface{}) + + // Upgrade labels fields + if len(rawMetadatas) > 0 && rawMetadatas[0] != nil { + rawMetadata := rawMetadatas[0].(map[string]interface{}) + + rawLabels := rawMetadata["labels"] + if rawLabels != nil { + labels := make(map[string]interface{}) + effectiveLabels := make(map[string]interface{}) + + for k, v := range rawLabels.(map[string]interface{}) { + effectiveLabels[k] = v + + if !cloudRunGoogleProvidedLabels.MatchString(k) { + labels[k] = v + } + } + + rawMetadata["labels"] = labels + rawMetadata["effective_labels"] = effectiveLabels + } + + upgradeAnnotations(rawMetadata) + + rawState["metadata"] = []interface{}{rawMetadata} + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/datastream_private_connection.go.tmpl b/mmv1/templates/terraform/state_migrations/go/datastream_private_connection.go.tmpl new file mode 100644 index 000000000000..4beedd6ac2bd --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/datastream_private_connection.go.tmpl @@ -0,0 +1,115 @@ +func resourceDatastreamPrivateConnectionResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Display name.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location this private connection is located in.`, + }, + "private_connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The private connectivity identifier.`, + }, + "vpc_peering_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The VPC Peering configuration is used to create VPC peering +between Datastream and the consumer's VPC.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A free subnet for peering. (CIDR of /29)`, + }, + "vpc": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Fully qualified name of the VPC that Datastream will peer to. +Format: projects/{project}/global/{networks}/{name}`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Labels. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "error": { + Type: schema.TypeList, + Computed: true, + Description: `The PrivateConnection error in case of failure.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeMap, + Optional: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "message": { + Type: schema.TypeString, + Optional: true, + Description: `A message containing more information about the error that occurred.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource's name.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the PrivateConnection.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceDatastreamPrivateConnectionUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/filestore_instance.go.tmpl b/mmv1/templates/terraform/state_migrations/go/filestore_instance.go.tmpl new file mode 100644 index 000000000000..597bf6f1c304 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/filestore_instance.go.tmpl @@ -0,0 +1,188 @@ +func resourceFilestoreInstanceResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file_shares": { + Type: schema.TypeList, + Required: true, + Description: `File system shares on the instance. For this version, only a +single file share is supported.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_gb": { + Type: schema.TypeInt, + Required: true, + Description: `File share capacity in GiB. This must be at least 1024 GiB +for the standard tier, or 2560 GiB for the premium tier.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the fileshare (16 characters or less)`, + }, + "nfs_export_options": { + Type: schema.TypeList, + Optional: true, + Description: `Nfs Export Options. There is a limit of 10 export options per file share.`, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE", ""}, false), + Description: `Either READ_ONLY, for allowing only read requests on the exported directory, +or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, + Default: "READ_WRITE", + }, + "anon_gid": { + Type: schema.TypeInt, + Optional: true, + Description: `An integer representing the anonymous group id with a default value of 65534. +Anon_gid may only be set with squashMode of ROOT_SQUASH. An error will be returned +if this field is specified for other squashMode settings.`, + }, + "anon_uid": { + Type: schema.TypeInt, + Optional: true, + Description: `An integer representing the anonymous user id with a default value of 65534. +Anon_uid may only be set with squashMode of ROOT_SQUASH. An error will be returned +if this field is specified for other squashMode settings.`, + }, + "ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `List of either IPv4 addresses, or ranges in CIDR notation which may mount the file share. +Overlapping IP ranges are not allowed, both within and across NfsExportOptions. An error will be returned. +The limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExportOptions.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "squash_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NO_ROOT_SQUASH", "ROOT_SQUASH", ""}, false), + Description: `Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH, +for not allowing root access. The default is NO_ROOT_SQUASH. Default value: "NO_ROOT_SQUASH" Possible values: ["NO_ROOT_SQUASH", "ROOT_SQUASH"]`, + Default: "NO_ROOT_SQUASH", + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the instance.`, + }, + "networks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `VPC networks to which the instance is connected. For this version, +only a single network is supported.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "modes": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `IP versions for which the instance has +IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"}, false), + }, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the GCE VPC network to which the +instance is connected.`, + }, + "connect_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}, false), + Description: `The network connect mode of the Filestore instance. +If not provided, the connect mode defaults to +DIRECT_PEERING. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, + Default: "DIRECT_PEERING", + }, + "reserved_ip_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `A /29 CIDR block that identifies the range of IP +addresses reserved for this instance.`, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Description: `A list of IPv4 or IPv6 addresses.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_UNSPECIFIED", "STANDARD", "PREMIUM", "BASIC_HDD", "BASIC_SSD", "HIGH_SCALE_SSD", "ZONAL"}, false), + Description: `The service tier of the instance. Possible values: ["TIER_UNSPECIFIED", "STANDARD", "PREMIUM", "BASIC_HDD", "BASIC_SSD", "HIGH_SCALE_SSD", "ZONAL"]`, + }, + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Filestore zone of the instance.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the instance.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user-provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Server-specified ETag for the instance resource to prevent +simultaneous updates from overwriting each other.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func ResourceFilestoreInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["location"] = rawState["zone"] + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/gke_hub_membership.go.tmpl b/mmv1/templates/terraform/state_migrations/go/gke_hub_membership.go.tmpl new file mode 100644 index 000000000000..b7c1415f5f0a --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/gke_hub_membership.go.tmpl @@ -0,0 +1,91 @@ +func resourceGKEHubMembershipResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the membership.`, + }, + "authority": { + Type: schema.TypeList, + Optional: true, + Description: `Authority encodes how Google will recognize identities from this Membership. +See the workload identity documentation for more details: +https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid +with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, + }, + }, + }, + }, + "endpoint": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gke_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressGkeHubEndpointSelfLinkDiff, + Description: `Self-link of the GCP resource for the GKE cluster. +For example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'. +It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, +this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or +'google_container_cluster.my-cluster.id'.`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this membership.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the membership.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func ResourceGKEHubMembershipUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + // Version 0 didn't support location. Default it to global. + rawState["location"] = "global" + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/kms_crypto_key.go.tmpl b/mmv1/templates/terraform/state_migrations/go/kms_crypto_key.go.tmpl new file mode 100644 index 000000000000..9d344cb1f5b9 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/kms_crypto_key.go.tmpl @@ -0,0 +1,40 @@ +func resourceKMSCryptoKeyResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "key_ring": { + Type: schema.TypeString, + Required: true, + }, + "rotation_period": { + Type: schema.TypeString, + Optional: true, + }, + "version_template": { + Type: schema.TypeList, + Optional: true, + }, + "self_link": { + Type: schema.TypeString, + }, + }, + } +} + +func ResourceKMSCryptoKeyUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + config := meta.(*transport_tpg.Config) + keyRingId := rawState["key_ring"].(string) + parsed, err := parseKmsKeyRingId(keyRingId, config) + if err != nil { + return nil, err + } + rawState["key_ring"] = parsed.KeyRingId() + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/ml_engine_model.go.tmpl b/mmv1/templates/terraform/state_migrations/go/ml_engine_model.go.tmpl new file mode 100644 index 000000000000..4d353f6913d0 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/ml_engine_model.go.tmpl @@ -0,0 +1,95 @@ +func resourceMLEngineModelResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the model.`, + }, + "default_version": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The default version of the model. This version will be used to handle +prediction requests that do not specify a version.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the version when it was created.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The description specified for the model when it was created.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `One or more labels that you can add, to organize your models. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "online_prediction_console_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, online prediction nodes send stderr and stdout streams to Stackdriver Logging`, + }, + "online_prediction_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, online prediction access logs are sent to StackDriver Logging.`, + }, + "regions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of regions where the model is going to be deployed. +Currently only one region per model is supported`, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceMLEngineModelUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/state_migrations/go/monitoring_monitored_project.go.tmpl new file mode 100644 index 000000000000..9dc51b7fd2f7 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/monitoring_monitored_project.go.tmpl @@ -0,0 +1,35 @@ +func resourceMonitoringMonitoredProjectResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metrics_scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Immutable. The resource name of the 'MonitoredProject'. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: 'locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}'`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when this 'MonitoredProject' was created.`, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceMonitoringMonitoredProjectUpgradeV0(_ context.Context, rawState map[string]any, meta any) (map[string]any, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["id"] = strings.TrimPrefix(rawState["id"].(string), "v1/") + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/state_migrations/go/network_services_service_binding.go.tmpl b/mmv1/templates/terraform/state_migrations/go/network_services_service_binding.go.tmpl new file mode 100644 index 000000000000..f9368b715908 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/network_services_service_binding.go.tmpl @@ -0,0 +1,71 @@ +func resourceNetworkServicesServiceBindingResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the ServiceBinding resource.`, + }, + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full Service Directory Service name of the format +projects/*/locations/*/namespaces/*/services/*`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A free-text description of the resource. Max length 1024 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Set of label tags associated with the ServiceBinding resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ServiceBinding was created in UTC.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the ServiceBinding was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceNetworkServicesServiceBindingUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/notebooks_instance.go.tmpl b/mmv1/templates/terraform/state_migrations/go/notebooks_instance.go.tmpl new file mode 100644 index 000000000000..069dacdd5216 --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/notebooks_instance.go.tmpl @@ -0,0 +1,407 @@ +const notebooksInstanceGoogleProvidedLabel = "goog-caip-notebook" + +func NotebooksInstanceLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the label provided by Google + if strings.Contains(k, notebooksInstanceGoogleProvidedLabel) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func resourceNotebooksInstanceResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the machine resides.`, + }, + "machine_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to a machine type which defines VM kind.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the Notebook instance.`, + }, + "accelerator_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The hardware accelerator used on this instance. If you use accelerators, +make sure that your configuration has enough vCPUs and memory to support the +machineType you have selected.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "core_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Count of cores of this accelerator.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"}), + Description: `Type of this accelerator. Possible values: ["ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"]`, + }, + }, + }, + }, + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The size of the boot disk in GB attached to this instance, +up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. +If not specified, this defaults to 100.`, + }, + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, + }, + "container_image": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Use a container image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The path to the container image repository. +For example: gcr.io/{project_id}/{imageName}`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "custom_gpu_driver_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specify a custom Cloud Storage path where the GPU driver is stored. +If not specified, we'll automatically choose from official GPU drivers.`, + }, + "data_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The size of the data disk in GB attached to this instance, +up to a maximum of 64000 GB (64 TB). +You can choose the size of the data disk based on how big your notebooks and data are. +If not specified, this defaults to 100.`, + }, + "data_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, + }, + "disk_encryption": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK", ""}), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("DISK_ENCRYPTION_UNSPECIFIED"), + Description: `Disk encryption method used on the boot and data disks, defaults to GMEK. Possible values: ["DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK"]`, + }, + "install_gpu_driver": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the end user authorizes Google Cloud to install GPU driver +on this instance. If this field is empty or set to false, the GPU driver +won't be installed. Only applicable to instances with GPUs.`, + }, + "instance_owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of owners of this instance after creation. +Format: alias@example.com. +Currently supports one owner only. +If not specified, all of the service account users of +your VM instance's service account can use the instance.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The KMS key used to encrypt the disks, only applicable if diskEncryption is CMEK. +Format: projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: NotebooksInstanceLabelDiffSuppress, + Description: `Labels to apply to this instance. These can be later modified by the setLabels method. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "metadata": { + Type: schema.TypeMap, + Optional: true, + Description: `Custom metadata to apply to this instance. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the VPC that this instance is in. +Format: projects/{project_id}/global/networks/{network_id}`, + }, + "nic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), + Description: `The type of vNIC driver. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, + }, + "no_proxy_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `The notebook instance will not register with the proxy..`, + }, + "no_public_ip": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `No public IP will be assigned to this instance.`, + }, + "no_remove_data_disk": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, the data disk will not be auto deleted when deleting the instance.`, + }, + "post_startup_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Path to a Bash script that automatically runs after a +notebook instance fully boots up. The path must be a URL +or Cloud Storage path (gs://path-to-file/file-name).`, + }, + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Reservation Affinity for consuming Zonal reservation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}), + Description: `The type of Compute Reservation. Possible values: ["NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"]`, + }, + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Corresponds to the label key of reservation resource.`, + }, + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Corresponds to the label values of reservation resource.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The service account on this instance, giving access to other +Google Cloud services. You can use any service account within +the same project, but you must have the service account user +permission to use the instance. If not specified, +the Compute Engine default service account is used.`, + }, + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Optional. The URIs of service account scopes to be included in Compute Engine instances. +If not specified, the following scopes are defined: +- https://www.googleapis.com/auth/cloud-platform +- https://www.googleapis.com/auth/userinfo.email`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "shielded_instance_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A set of Shielded Instance options. Check [Images using supported Shielded VM features] +Not all combinations are valid`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the +boot integrity of the instance. The attestation is performed against the integrity policy baseline. +This baseline is initially derived from the implicitly trusted boot image when the instance is created. +Enabled by default.`, + Default: true, + }, + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs +authentic software by verifying the digital signature of all boot components, and halting the boot process +if signature verification fails. +Disabled by default.`, + }, + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has the vTPM enabled. +Enabled by default.`, + Default: true, + }, + }, + }, + }, + "subnet": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the subnet that this instance is in. +Format: projects/{project_id}/regions/{region}/subnetworks/{subnetwork_id}`, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The Compute Engine tags to add to instance.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vm_image": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Use a Compute Engine VM image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Google Cloud project that this VM image belongs to. +Format: projects/{project_id}`, + }, + "image_family": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Use this VM image family to find the image; the newest image in this family will be used.`, + }, + "image_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Use VM image name to find the image.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Instance creation time`, + }, + "proxy_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The proxy endpoint that is used to access the Jupyter notebook. +Only returned when the resource is in a 'PROVISIONED' state. If +needed you can utilize 'terraform apply -refresh-only' to await +the population of this value.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of this instance.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Instance update time.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceNotebooksInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.LabelsStateUpgrade(rawState, notebooksInstanceGoogleProvidedLabel) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/state_migrations/go/workflows_workflow.go.tmpl b/mmv1/templates/terraform/state_migrations/go/workflows_workflow.go.tmpl new file mode 100644 index 000000000000..89e14b615b3e --- /dev/null +++ b/mmv1/templates/terraform/state_migrations/go/workflows_workflow.go.tmpl @@ -0,0 +1,90 @@ +func resourceWorkflowsWorkflowResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to this Workflow.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Name of the Workflow.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the workflow.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the service account associated with the latest workflow version. This service +account represents the identity of the workflow and determines what permissions the workflow has. + +Format: projects/{project}/serviceAccounts/{account}.`, + }, + "source_contents": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Workflow code to be executed. The size limit is 32KB.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "revision_id": { + Type: schema.TypeString, + Computed: true, + Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the workflow deployment.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func ResourceWorkflowsWorkflowUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["name"] = tpgresource.GetResourceNameFromSelfLink(rawState["name"].(string)) + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/mmv1/templates/terraform/update_encoder/go/active_directory_domain_trust.go.tmpl b/mmv1/templates/terraform/update_encoder/go/active_directory_domain_trust.go.tmpl new file mode 100644 index 000000000000..a5201bb806dc --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/active_directory_domain_trust.go.tmpl @@ -0,0 +1,17 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +wrappedReq := map[string]interface{}{ + "targetDomainName": obj["targetDomainName"], + "targetDnsIpAddresses": obj["targetDnsIpAddresses"], +} +return wrappedReq, nil diff --git a/mmv1/templates/terraform/update_encoder/go/cloud_scheduler.go.tmpl b/mmv1/templates/terraform/update_encoder/go/cloud_scheduler.go.tmpl new file mode 100644 index 000000000000..8a9b4e7e9da9 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/cloud_scheduler.go.tmpl @@ -0,0 +1,2 @@ +delete(obj, "paused") // Field doesn't exist in API +return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/update_encoder/go/compute_network.go.tmpl b/mmv1/templates/terraform/update_encoder/go/compute_network.go.tmpl new file mode 100644 index 000000000000..084107f2772e --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/compute_network.go.tmpl @@ -0,0 +1,2 @@ +delete(obj, "numeric_id") // Field doesn't exist in the API +return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/compute_per_instance_config.go.tmpl b/mmv1/templates/terraform/update_encoder/go/compute_per_instance_config.go.tmpl new file mode 100644 index 000000000000..abd89bda14c3 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/compute_per_instance_config.go.tmpl @@ -0,0 +1,17 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// updates and creates use different wrapping object names +wrappedReq := map[string]interface{}{ + "perInstanceConfigs": []interface{}{obj}, +} +return wrappedReq, nil diff --git a/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl b/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl new file mode 100644 index 000000000000..b6a29616fe9d --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl @@ -0,0 +1,27 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +// need to send value in PATCH due to validation bug on api b/198329756 +nameProp := d.Get("name") +if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp +} + + +// need to send value in PATCH due to validation bug on api b/198308475 +enableProxyProtocolProp := d.Get("enable_proxy_protocol") +if v, ok := d.GetOkExists("enable_proxy_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { + obj["enableProxyProtocol"] = enableProxyProtocolProp +} + +return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/containeranalysis_occurrence.go.tmpl b/mmv1/templates/terraform/update_encoder/go/containeranalysis_occurrence.go.tmpl new file mode 100644 index 000000000000..2dae6c9a6d1c --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/containeranalysis_occurrence.go.tmpl @@ -0,0 +1,21 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// Note is required, even for PATCH +noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, meta.(*transport_tpg.Config)) +if err != nil { + return nil, err +} else if v, ok := d.GetOkExists("note_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(noteNameProp)) && (ok || !reflect.DeepEqual(v, noteNameProp)) { + obj["noteName"] = noteNameProp +} + +return resource{{$.ResourceName}}Encoder(d, meta, obj) diff --git a/mmv1/templates/terraform/update_encoder/go/dlp_stored_info_type.go.tmpl b/mmv1/templates/terraform/update_encoder/go/dlp_stored_info_type.go.tmpl new file mode 100644 index 000000000000..b2342f86df0f --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/dlp_stored_info_type.go.tmpl @@ -0,0 +1,15 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + newObj := make(map[string]interface{}) + newObj["config"] = obj + return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/hyper_disk.go.tmpl b/mmv1/templates/terraform/update_encoder/go/hyper_disk.go.tmpl new file mode 100644 index 000000000000..f787b39b63e6 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/hyper_disk.go.tmpl @@ -0,0 +1,8 @@ + +if (d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("provisioned_throughput") && strings.Contains(d.Get("type").(string), "hyperdisk")) { + nameProp := d.Get("name") + if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } +} +return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/kms_crypto_key.go.tmpl b/mmv1/templates/terraform/update_encoder/go/kms_crypto_key.go.tmpl new file mode 100644 index 000000000000..85b6b99e797c --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/kms_crypto_key.go.tmpl @@ -0,0 +1,25 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + // if rotationPeriod is changed, nextRotationTime must also be set. + if d.HasChange("rotation_period") && d.Get("rotation_period") != "" { + rotationPeriod := d.Get("rotation_period").(string) + nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) + + if err != nil { + return nil, fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) + } + + obj["nextRotationTime"] = nextRotation + } + + return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/managed_dns_zone.go.tmpl b/mmv1/templates/terraform/update_encoder/go/managed_dns_zone.go.tmpl new file mode 100644 index 000000000000..5a10048b0811 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/managed_dns_zone.go.tmpl @@ -0,0 +1,11 @@ + // The upstream update method (https://cloud.google.com/dns/docs/reference/v1/managedZones/update) + // requires the full ManagedZones object, therefore, we need to keep some input only values in the struct + // and then reuse it in the update + nameServers, ok := d.GetOkExists("name_servers") + if !ok { + nameServers = []string{} + } + obj["nameServers"] = nameServers + obj["id"] = d.Get("managed_zone_id") + obj["creationTime"] = d.Get("creation_time") + return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/update_encoder/go/org_policy_custom_constraint.go.tmpl b/mmv1/templates/terraform/update_encoder/go/org_policy_custom_constraint.go.tmpl new file mode 100644 index 000000000000..fa265685fea9 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/org_policy_custom_constraint.go.tmpl @@ -0,0 +1,7 @@ +// need to send resource_types in all PATCH requests +resourceTypesProp := d.Get("resource_types") +if v, ok := d.GetOkExists("resource_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourceTypesProp)) { + obj["resourceTypes"] = resourceTypesProp +} + +return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/private_cloud.go.tmpl b/mmv1/templates/terraform/update_encoder/go/private_cloud.go.tmpl new file mode 100644 index 000000000000..10f85bd78b90 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/private_cloud.go.tmpl @@ -0,0 +1,2 @@ +delete(obj, "managementCluster") +return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/update_encoder/go/pubsub_schema.tmpl b/mmv1/templates/terraform/update_encoder/go/pubsub_schema.tmpl new file mode 100644 index 000000000000..a9b473951ab1 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/pubsub_schema.tmpl @@ -0,0 +1,17 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + newObj := make(map[string]interface{}) + newObj["name"] = d.Id() + obj["name"] = d.Id() + newObj["schema"] = obj + return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/pubsub_subscription.tmpl b/mmv1/templates/terraform/update_encoder/go/pubsub_subscription.tmpl new file mode 100644 index 000000000000..50914402b631 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/pubsub_subscription.tmpl @@ -0,0 +1,15 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + newObj := make(map[string]interface{}) + newObj["subscription"] = obj +return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/pubsub_topic.tmpl b/mmv1/templates/terraform/update_encoder/go/pubsub_topic.tmpl new file mode 100644 index 000000000000..79ef82409fac --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/pubsub_topic.tmpl @@ -0,0 +1,15 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + newObj := make(map[string]interface{}) + newObj["topic"] = obj +return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl b/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl new file mode 100644 index 000000000000..38dc7e36589e --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl @@ -0,0 +1,109 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + newObj := make(map[string]interface{}) + config := meta.(*transport_tpg.Config) + maskId := "" + firstProject := true + urlUpdateMask := "" + + if d.HasChange("share_settings") { + // Get name. + nameProp, err := expandComputeReservationName(d.Get("name"), d, config) + if err != nil { + return nil, fmt.Errorf("Invalid value for name: %s", err) + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + newObj["name"] = nameProp + } + // Get zone. + zoneProp, err := expandComputeReservationZone(d.Get("zone"), d, config) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + newObj["zone"] = zoneProp + } + transformed := make(map[string]interface{}) + // Set shareType. + transformed["shareType"] = "SPECIFIC_PROJECTS" + // Set project_map. + projectMap := make(map[string]interface{}) + old, new := d.GetChange("share_settings") + oldMap := old.([]interface{})[0].(map[string]interface{})["project_map"] + newMap := new.([]interface{})[0].(map[string]interface{})["project_map"] + before := oldMap.(*schema.Set) + after := newMap.(*schema.Set) + + for _, raw := range after.Difference(before).List() { + original := raw.(map[string]interface{}) + singleProject := make(map[string]interface{}) + // set up project_map. + transformedProjectId := original["project_id"] + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + singleProject["projectId"] = transformedProjectId + } + transformedId, err := tpgresource.ExpandString(original["id"], d, config) + if err != nil { + return nil, fmt.Errorf("Invalid value for id: %s", err) + } + projectMap[transformedId] = singleProject + // add added projects to updateMask + if firstProject != true { + maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["project_id"]) + } else { + maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["project_id"]) + firstProject = false + } + decodedPath, _ := url.QueryUnescape(maskId) + urlUpdateMask = urlUpdateMask + decodedPath + } + transformed["projectMap"] = projectMap + newObj["shareSettings"] = transformed + + // add removed projects to updateMask + firstProject = true + for _, raw := range before.Difference(after).List() { + original := raw.(map[string]interface{}) + // To remove a project we need project number. + projectId := fmt.Sprintf("%s", original["project_id"]) + projectIdOrNum := projectId + _, err := strconv.Atoi(projectId) + // convert id to number. + if err != nil { + config := meta.(*transport_tpg.Config) + project, err := config.NewResourceManagerClient(config.UserAgent).Projects.Get(projectId).Do() + if err != nil { + return nil, fmt.Errorf("Invalid value for projectId: %s", err) + } + projectNum := project.ProjectNumber + projectIdOrNum = fmt.Sprintf("%d", projectNum) + } + if firstProject != true { + maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", projectIdOrNum) + } else { + maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", projectIdOrNum) + firstProject = false + } + decodedPath, _ := url.QueryUnescape(maskId) + urlUpdateMask = urlUpdateMask + decodedPath + } + newObj["urlUpdateMask"] = urlUpdateMask + } + + // Resize. + if obj["specificReservation"] != nil { + count := obj["specificReservation"].(map[string]interface{})["count"] + if count != nil { + newObj["specificSkuCount"] = obj["specificReservation"].(map[string]interface{})["count"] + } + } + + return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/source_repo_repository.tmpl b/mmv1/templates/terraform/update_encoder/go/source_repo_repository.tmpl new file mode 100644 index 000000000000..39dc2948f13f --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/source_repo_repository.tmpl @@ -0,0 +1,26 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + // Add "topic" field using pubsubConfig map key + pubsubConfigsVal := obj["pubsubConfigs"] + if pubsubConfigsVal != nil { + pubsubConfigs := pubsubConfigsVal.(map[string]interface{}) + for key := range pubsubConfigs { + config := pubsubConfigs[key].(map[string]interface{}) + config["topic"] = key + } + } + + // Nest request body in "repo" field + newObj := make(map[string]interface{}) + newObj["repo"] = obj + return newObj, nil diff --git a/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl new file mode 100644 index 000000000000..d7a891090f87 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl @@ -0,0 +1,43 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +if obj["versionRetentionPeriod"] != nil || obj["extraStatements"] != nil { + old, new := d.GetChange("ddl") + oldDdls := old.([]interface{}) + newDdls := new.([]interface{}) + updateDdls := []string{} + + //Only new ddl statments to be add to update call + for i := len(oldDdls); i < len(newDdls); i++ { + if newDdls[i] != nil { + updateDdls = append(updateDdls, newDdls[i].(string)) + } + } + + //Add statement to update version_retention_period property, if needed + if d.HasChange("version_retention_period") { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, obj["versionRetentionPeriod"]) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, obj["versionRetentionPeriod"]) + } + updateDdls = append(updateDdls, retentionDdl) + } + + obj["statements"] = updateDdls + delete(obj, "name") + delete(obj, "versionRetentionPeriod") + delete(obj, "instance") + delete(obj, "extraStatements") +} +return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/update_encoder/go/ssl_policy.tmpl b/mmv1/templates/terraform/update_encoder/go/ssl_policy.tmpl new file mode 100644 index 000000000000..fbb86c5af5ec --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/ssl_policy.tmpl @@ -0,0 +1,22 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +// TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/184): Handle fingerprint consistently +obj["fingerprint"] = d.Get("fingerprint") + +// TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/183): Can we generalize this +// Send a null fields if customFeatures is empty. +if v, ok := obj["customFeatures"]; ok && len(v.([]interface{})) == 0 { + obj["customFeatures"] = nil +} + +return obj, nil